| Total Complexity | 48 |
| Total Lines | 242 |
| Duplicated Lines | 40.91 % |
| Coverage | 0% |
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like utils.rule_dir_stats often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | #!/usr/bin/env python |
||
| 2 | |||
| 3 | from __future__ import print_function |
||
| 4 | |||
| 5 | import argparse |
||
| 6 | import os |
||
| 7 | import sys |
||
| 8 | |||
| 9 | import json |
||
| 10 | import pprint |
||
| 11 | |||
| 12 | import ssg.build_yaml |
||
| 13 | import ssg.oval |
||
| 14 | import ssg.build_remediations |
||
| 15 | import ssg.products |
||
| 16 | import ssg.rule_dir_stats as rds |
||
| 17 | import ssg.rules |
||
| 18 | import ssg.yaml |
||
| 19 | |||
| 20 | |||
| 21 | SSG_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) |
||
| 22 | |||
| 23 | |||
| 24 | def parse_args(): |
||
| 25 | parser = argparse.ArgumentParser() |
||
| 26 | parser.add_argument("-i", "--input", type=str, action="store", default="build/rule_dirs.json", |
||
| 27 | help="File to read json output of rule_dir_json from (defaults to build/rule_dirs.json)") |
||
| 28 | |||
| 29 | parser.add_argument("-p", "--products", type=str, action="store", default="all", |
||
| 30 | help="Products to inquire about, as a comma separated list") |
||
| 31 | parser.add_argument("-t", "--strict", action="store_true", |
||
| 32 | help="Enforce strict --products checking against rule.yml prodtype only") |
||
| 33 | parser.add_argument("-q", "--query", type=str, action="store", default=None, |
||
| 34 | help="Limit actions to only act on a comma separated list of rule_ids") |
||
| 35 | |||
| 36 | parser.add_argument("-m", "--missing", action="store_true", |
||
| 37 | help="List rules which are missing OVALs or fixes") |
||
| 38 | parser.add_argument("-2", "--two-plus", action="store_true", |
||
| 39 | help="List rules which have two or more OVALs or fixes") |
||
| 40 | parser.add_argument("-r", "--prodtypes", action="store_true", |
||
| 41 | help="List rules which have different YAML prodtypes from checks+fix prodtypes") |
||
| 42 | parser.add_argument("-n", "--product-names", action="store_true", |
||
| 43 | help="List rules which have product specific objects with broader accepted products") |
||
| 44 | parser.add_argument("-?", "--introspect", action="store_true", |
||
| 45 | help="Dump raw objects for explicitly queried rule_ids") |
||
| 46 | parser.add_argument("-u", "--unassociated", action="store_true", |
||
| 47 | help="Search for rules without any product association") |
||
| 48 | |||
| 49 | parser.add_argument("-o", "--ovals-only", action="store_true", |
||
| 50 | help="Only output information about OVALs") |
||
| 51 | parser.add_argument("-f", "--fixes-only", action="store_true", |
||
| 52 | help="Only output information about fixes") |
||
| 53 | |||
| 54 | parser.add_argument("-s", "--summary-only", action="store_true", |
||
| 55 | help="Only output summary information") |
||
| 56 | |||
| 57 | return parser.parse_args() |
||
| 58 | |||
| 59 | |||
| 60 | View Code Duplication | def process_missing(args, known_rules): |
|
|
|
|||
| 61 | result = rds.walk_rules_stats(args, known_rules, rds.missing_oval, rds.missing_remediation) |
||
| 62 | affected_rules = result[0] |
||
| 63 | affected_ovals = result[1] |
||
| 64 | affected_remediations = result[3] |
||
| 65 | affected_remediations_type = result[4] |
||
| 66 | verbose_output = result[5] |
||
| 67 | |||
| 68 | if not args.summary_only: |
||
| 69 | print("Missing Objects Specifics:") |
||
| 70 | for line in verbose_output: |
||
| 71 | print(line) |
||
| 72 | print("\n") |
||
| 73 | |||
| 74 | print("Missing Objects Summary:") |
||
| 75 | print("Total affected rules: %d" % affected_rules) |
||
| 76 | if not args.fixes_only: |
||
| 77 | print("Rules with no OVALs: %d / %d" % (affected_ovals, affected_rules)) |
||
| 78 | if not args.ovals_only: |
||
| 79 | print("Rules without any remediations: %d / %d" % (affected_remediations, affected_rules)) |
||
| 80 | for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP: |
||
| 81 | r_missing = affected_remediations_type[r_type] |
||
| 82 | print("Rules with no %s remediations: %d / %d" % (r_type, r_missing, affected_rules)) |
||
| 83 | print("\n") |
||
| 84 | |||
| 85 | |||
| 86 | View Code Duplication | def process_two_plus(args, known_rules): |
|
| 87 | result = rds.walk_rules_stats(args, known_rules, rds.two_plus_oval, rds.two_plus_remediation) |
||
| 88 | affected_rules = result[0] |
||
| 89 | affected_ovals = result[1] |
||
| 90 | affected_remediations = result[2] |
||
| 91 | affected_remediations_type = result[4] |
||
| 92 | verbose_output = result[5] |
||
| 93 | |||
| 94 | if not args.summary_only: |
||
| 95 | print("Two Plus Object Specifics:") |
||
| 96 | for line in verbose_output: |
||
| 97 | print(line) |
||
| 98 | print("\n") |
||
| 99 | |||
| 100 | print("Two Plus Objects Summary:") |
||
| 101 | print("Total affected rules: %d" % affected_rules) |
||
| 102 | if not args.fixes_only: |
||
| 103 | print("Rules with two or more OVALs: %d / %d" % (affected_ovals, affected_rules)) |
||
| 104 | if not args.ovals_only: |
||
| 105 | print("Rules with two or more remediations: %d / %d" % (affected_remediations, affected_rules)) |
||
| 106 | for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP: |
||
| 107 | r_missing = affected_remediations_type[r_type] |
||
| 108 | print("Rules with two or more %s remediations: %d / %d" % (r_type, r_missing, affected_rules)) |
||
| 109 | |||
| 110 | print("\n") |
||
| 111 | |||
| 112 | |||
| 113 | View Code Duplication | def process_prodtypes(args, known_rules): |
|
| 114 | result = rds.walk_rules_stats(args, known_rules, rds.prodtypes_oval, rds.prodtypes_remediation) |
||
| 115 | affected_rules = result[0] |
||
| 116 | affected_ovals = result[1] |
||
| 117 | affected_remediations = result[2] |
||
| 118 | affected_remediations_type = result[4] |
||
| 119 | verbose_output = result[5] |
||
| 120 | |||
| 121 | if not args.summary_only: |
||
| 122 | print("Prodtypes Object Specifics:") |
||
| 123 | for line in verbose_output: |
||
| 124 | print(line) |
||
| 125 | print("\n") |
||
| 126 | |||
| 127 | print("Prodtypes Objects Summary:") |
||
| 128 | print("Total affected rules: %d" % affected_rules) |
||
| 129 | if not args.fixes_only: |
||
| 130 | print("Rules with differing prodtypes between YAML and OVALs: %d / %d" % (affected_ovals, affected_rules)) |
||
| 131 | if not args.ovals_only: |
||
| 132 | print("Rules with differing prodtypes between YAML and remediations: %d / %d" % (affected_remediations, affected_rules)) |
||
| 133 | for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP: |
||
| 134 | r_missing = affected_remediations_type[r_type] |
||
| 135 | print("Rules with differing prodtypes between YAML and %s remediations: %d / %d" % (r_type, r_missing, affected_rules)) |
||
| 136 | |||
| 137 | print("\n") |
||
| 138 | |||
| 139 | |||
| 140 | View Code Duplication | def process_product_names(args, known_rules): |
|
| 141 | result = rds.walk_rules_stats(args, known_rules, rds.product_names_oval, rds.product_names_remediation) |
||
| 142 | affected_rules = result[0] |
||
| 143 | affected_ovals = result[1] |
||
| 144 | affected_remediations = result[2] |
||
| 145 | affected_remediations_type = result[4] |
||
| 146 | verbose_output = result[5] |
||
| 147 | |||
| 148 | if not args.summary_only: |
||
| 149 | print("Product Names Specifics:") |
||
| 150 | for line in verbose_output: |
||
| 151 | print(line) |
||
| 152 | print("\n") |
||
| 153 | |||
| 154 | print("Product Names Summary:") |
||
| 155 | print("Total affected rules: %d" % affected_rules) |
||
| 156 | if not args.fixes_only: |
||
| 157 | print("Rules with differing products and OVAL names: %d / %d" % (affected_ovals, affected_rules)) |
||
| 158 | if not args.ovals_only: |
||
| 159 | print("Rules with differing product and remediation names: %d / %d" % (affected_remediations, affected_rules)) |
||
| 160 | for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP: |
||
| 161 | r_missing = affected_remediations_type[r_type] |
||
| 162 | print("Rules with differing product and %s remediation names: %d / %d" % (r_type, r_missing, affected_rules)) |
||
| 163 | |||
| 164 | print("\n") |
||
| 165 | |||
| 166 | |||
| 167 | def process_introspection(args, known_rules): |
||
| 168 | for rule_id in args.query: |
||
| 169 | if not args.summary_only: |
||
| 170 | pprint.pprint(known_rules[rule_id]) |
||
| 171 | print("\n") |
||
| 172 | else: |
||
| 173 | print(rule_id) |
||
| 174 | |||
| 175 | |||
| 176 | def process_unassociated(args, known_rules, all_products): |
||
| 177 | save_ovals_only = args.ovals_only |
||
| 178 | save_fixes_only = args.fixes_only |
||
| 179 | save_strict = args.strict |
||
| 180 | |||
| 181 | args.ovals_only = False |
||
| 182 | args.fixes_only = False |
||
| 183 | args.strict = False |
||
| 184 | |||
| 185 | for rule_id in known_rules: |
||
| 186 | rule_obj = known_rules[rule_id] |
||
| 187 | affected_products = rds.get_all_affected_products(args, rule_obj) |
||
| 188 | if affected_products.intersection(all_products): |
||
| 189 | continue |
||
| 190 | |||
| 191 | print("Unassociated Rule: rule_id:%s" % rule_id) |
||
| 192 | |||
| 193 | args.ovals_only = save_ovals_only |
||
| 194 | args.fixes_only = save_fixes_only |
||
| 195 | args.stict = save_strict |
||
| 196 | |||
| 197 | |||
| 198 | def main(): |
||
| 199 | args = parse_args() |
||
| 200 | |||
| 201 | linux_products, other_products = ssg.products.get_all(SSG_ROOT) |
||
| 202 | all_products = linux_products.union(other_products) |
||
| 203 | |||
| 204 | json_file = open(args.input, 'r') |
||
| 205 | known_rules = json.load(json_file) |
||
| 206 | |||
| 207 | if args.products.lower() == 'all': |
||
| 208 | args.products = all_products |
||
| 209 | elif args.products.lower() == 'linux': |
||
| 210 | args.products = linux_products |
||
| 211 | elif args.products.lower() == 'other': |
||
| 212 | args.products = other_products |
||
| 213 | else: |
||
| 214 | args.products = args.products.split(',') |
||
| 215 | args.products = set(args.products) |
||
| 216 | |||
| 217 | args.query = rds.filter_rule_ids(set(known_rules), args.query) |
||
| 218 | |||
| 219 | if not args.missing and not args.two_plus and not args.prodtypes and not args.introspect and not args.unassociated and not args.product_names: |
||
| 220 | args.missing = True |
||
| 221 | args.two_plus = True |
||
| 222 | args.prodtypes = True |
||
| 223 | |||
| 224 | print("Total number of known rule directories: %d" % len(known_rules)) |
||
| 225 | print("Total number of queried rules: %d\n" % len(args.query)) |
||
| 226 | |||
| 227 | if args.missing: |
||
| 228 | process_missing(args, known_rules) |
||
| 229 | if args.two_plus: |
||
| 230 | process_two_plus(args, known_rules) |
||
| 231 | if args.prodtypes: |
||
| 232 | process_prodtypes(args, known_rules) |
||
| 233 | if args.product_names: |
||
| 234 | process_product_names(args, known_rules) |
||
| 235 | if args.introspect and args.query: |
||
| 236 | process_introspection(args, known_rules) |
||
| 237 | if args.unassociated: |
||
| 238 | process_unassociated(args, known_rules, all_products) |
||
| 239 | |||
| 240 | if __name__ == "__main__": |
||
| 241 | main() |
||
| 242 |