| Total Complexity | 48 |
| Total Lines | 241 |
| Duplicated Lines | 41.08 % |
| Coverage | 0% |
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like rule_dir_stats often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | #!/usr/bin/env python |
||
| 2 | |||
| 3 | from __future__ import print_function |
||
| 4 | |||
| 5 | import argparse |
||
| 6 | import os |
||
| 7 | import sys |
||
| 8 | |||
| 9 | import json |
||
| 10 | import pprint |
||
| 11 | |||
| 12 | import ssg.build_yaml |
||
| 13 | import ssg.oval |
||
| 14 | import ssg.build_remediations |
||
| 15 | import ssg.rule_dir_stats as rds |
||
| 16 | import ssg.rules |
||
| 17 | import ssg.yaml |
||
| 18 | |||
| 19 | |||
| 20 | SSG_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) |
||
| 21 | |||
| 22 | |||
| 23 | def parse_args(): |
||
| 24 | parser = argparse.ArgumentParser() |
||
| 25 | parser.add_argument("-i", "--input", type=str, action="store", default="build/rule_dirs.json", |
||
| 26 | help="File to read json output of rule_dir_json from (defaults to build/rule_dirs.json)") |
||
| 27 | |||
| 28 | parser.add_argument("-p", "--products", type=str, action="store", default="all", |
||
| 29 | help="Products to inquire about, as a comma separated list") |
||
| 30 | parser.add_argument("-t", "--strict", action="store_true", |
||
| 31 | help="Enforce strict --products checking against rule.yml prodtype only") |
||
| 32 | parser.add_argument("-q", "--query", type=str, action="store", default=None, |
||
| 33 | help="Limit actions to only act on a comma separated list of rule_ids") |
||
| 34 | |||
| 35 | parser.add_argument("-m", "--missing", action="store_true", |
||
| 36 | help="List rules which are missing OVALs or fixes") |
||
| 37 | parser.add_argument("-2", "--two-plus", action="store_true", |
||
| 38 | help="List rules which have two or more OVALs or fixes") |
||
| 39 | parser.add_argument("-r", "--prodtypes", action="store_true", |
||
| 40 | help="List rules which have different YAML prodtypes from checks+fix prodtypes") |
||
| 41 | parser.add_argument("-n", "--product-names", action="store_true", |
||
| 42 | help="List rules which have product specific objects with broader accepted products") |
||
| 43 | parser.add_argument("-?", "--introspect", action="store_true", |
||
| 44 | help="Dump raw objects for explicitly queried rule_ids") |
||
| 45 | parser.add_argument("-u", "--unassociated", action="store_true", |
||
| 46 | help="Search for rules without any product association") |
||
| 47 | |||
| 48 | parser.add_argument("-o", "--ovals-only", action="store_true", |
||
| 49 | help="Only output information about OVALs") |
||
| 50 | parser.add_argument("-f", "--fixes-only", action="store_true", |
||
| 51 | help="Only output information about fixes") |
||
| 52 | |||
| 53 | parser.add_argument("-s", "--summary-only", action="store_true", |
||
| 54 | help="Only output summary information") |
||
| 55 | |||
| 56 | return parser.parse_args() |
||
| 57 | |||
| 58 | |||
| 59 | View Code Duplication | def process_missing(args, known_rules): |
|
|
|
|||
| 60 | result = rds.walk_rules_stats(args, known_rules, rds.missing_oval, rds.missing_remediation) |
||
| 61 | affected_rules = result[0] |
||
| 62 | affected_ovals = result[1] |
||
| 63 | affected_remediations = result[3] |
||
| 64 | affected_remediations_type = result[4] |
||
| 65 | verbose_output = result[5] |
||
| 66 | |||
| 67 | if not args.summary_only: |
||
| 68 | print("Missing Objects Specifics:") |
||
| 69 | for line in verbose_output: |
||
| 70 | print(line) |
||
| 71 | print("\n") |
||
| 72 | |||
| 73 | print("Missing Objects Summary:") |
||
| 74 | print("Total affected rules: %d" % affected_rules) |
||
| 75 | if not args.fixes_only: |
||
| 76 | print("Rules with no OVALs: %d / %d" % (affected_ovals, affected_rules)) |
||
| 77 | if not args.ovals_only: |
||
| 78 | print("Rules without any remediations: %d / %d" % (affected_remediations, affected_rules)) |
||
| 79 | for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP: |
||
| 80 | r_missing = affected_remediations_type[r_type] |
||
| 81 | print("Rules with no %s remediations: %d / %d" % (r_type, r_missing, affected_rules)) |
||
| 82 | print("\n") |
||
| 83 | |||
| 84 | |||
| 85 | View Code Duplication | def process_two_plus(args, known_rules): |
|
| 86 | result = rds.walk_rules_stats(args, known_rules, rds.two_plus_oval, rds.two_plus_remediation) |
||
| 87 | affected_rules = result[0] |
||
| 88 | affected_ovals = result[1] |
||
| 89 | affected_remediations = result[2] |
||
| 90 | affected_remediations_type = result[4] |
||
| 91 | verbose_output = result[5] |
||
| 92 | |||
| 93 | if not args.summary_only: |
||
| 94 | print("Two Plus Object Specifics:") |
||
| 95 | for line in verbose_output: |
||
| 96 | print(line) |
||
| 97 | print("\n") |
||
| 98 | |||
| 99 | print("Two Plus Objects Summary:") |
||
| 100 | print("Total affected rules: %d" % affected_rules) |
||
| 101 | if not args.fixes_only: |
||
| 102 | print("Rules with two or more OVALs: %d / %d" % (affected_ovals, affected_rules)) |
||
| 103 | if not args.ovals_only: |
||
| 104 | print("Rules with two or more remediations: %d / %d" % (affected_remediations, affected_rules)) |
||
| 105 | for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP: |
||
| 106 | r_missing = affected_remediations_type[r_type] |
||
| 107 | print("Rules with two or more %s remediations: %d / %d" % (r_type, r_missing, affected_rules)) |
||
| 108 | |||
| 109 | print("\n") |
||
| 110 | |||
| 111 | |||
| 112 | View Code Duplication | def process_prodtypes(args, known_rules): |
|
| 113 | result = rds.walk_rules_stats(args, known_rules, rds.prodtypes_oval, rds.prodtypes_remediation) |
||
| 114 | affected_rules = result[0] |
||
| 115 | affected_ovals = result[1] |
||
| 116 | affected_remediations = result[2] |
||
| 117 | affected_remediations_type = result[4] |
||
| 118 | verbose_output = result[5] |
||
| 119 | |||
| 120 | if not args.summary_only: |
||
| 121 | print("Prodtypes Object Specifics:") |
||
| 122 | for line in verbose_output: |
||
| 123 | print(line) |
||
| 124 | print("\n") |
||
| 125 | |||
| 126 | print("Prodtypes Objects Summary:") |
||
| 127 | print("Total affected rules: %d" % affected_rules) |
||
| 128 | if not args.fixes_only: |
||
| 129 | print("Rules with differing prodtypes between YAML and OVALs: %d / %d" % (affected_ovals, affected_rules)) |
||
| 130 | if not args.ovals_only: |
||
| 131 | print("Rules with differing prodtypes between YAML and remediations: %d / %d" % (affected_remediations, affected_rules)) |
||
| 132 | for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP: |
||
| 133 | r_missing = affected_remediations_type[r_type] |
||
| 134 | print("Rules with differing prodtypes between YAML and %s remediations: %d / %d" % (r_type, r_missing, affected_rules)) |
||
| 135 | |||
| 136 | print("\n") |
||
| 137 | |||
| 138 | |||
| 139 | View Code Duplication | def process_product_names(args, known_rules): |
|
| 140 | result = rds.walk_rules_stats(args, known_rules, rds.product_names_oval, rds.product_names_remediation) |
||
| 141 | affected_rules = result[0] |
||
| 142 | affected_ovals = result[1] |
||
| 143 | affected_remediations = result[2] |
||
| 144 | affected_remediations_type = result[4] |
||
| 145 | verbose_output = result[5] |
||
| 146 | |||
| 147 | if not args.summary_only: |
||
| 148 | print("Product Names Specifics:") |
||
| 149 | for line in verbose_output: |
||
| 150 | print(line) |
||
| 151 | print("\n") |
||
| 152 | |||
| 153 | print("Product Names Summary:") |
||
| 154 | print("Total affected rules: %d" % affected_rules) |
||
| 155 | if not args.fixes_only: |
||
| 156 | print("Rules with differing products and OVAL names: %d / %d" % (affected_ovals, affected_rules)) |
||
| 157 | if not args.ovals_only: |
||
| 158 | print("Rules with differing product and remediation names: %d / %d" % (affected_remediations, affected_rules)) |
||
| 159 | for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP: |
||
| 160 | r_missing = affected_remediations_type[r_type] |
||
| 161 | print("Rules with differing product and %s remediation names: %d / %d" % (r_type, r_missing, affected_rules)) |
||
| 162 | |||
| 163 | print("\n") |
||
| 164 | |||
| 165 | |||
| 166 | def process_introspection(args, known_rules): |
||
| 167 | for rule_id in args.query: |
||
| 168 | if not args.summary_only: |
||
| 169 | pprint.pprint(known_rules[rule_id]) |
||
| 170 | print("\n") |
||
| 171 | else: |
||
| 172 | print(rule_id) |
||
| 173 | |||
| 174 | |||
| 175 | def process_unassociated(args, known_rules, all_products): |
||
| 176 | save_ovals_only = args.ovals_only |
||
| 177 | save_fixes_only = args.fixes_only |
||
| 178 | save_strict = args.strict |
||
| 179 | |||
| 180 | args.ovals_only = False |
||
| 181 | args.fixes_only = False |
||
| 182 | args.strict = False |
||
| 183 | |||
| 184 | for rule_id in known_rules: |
||
| 185 | rule_obj = known_rules[rule_id] |
||
| 186 | affected_products = rds.get_all_affected_products(args, rule_obj) |
||
| 187 | if affected_products.intersection(all_products): |
||
| 188 | continue |
||
| 189 | |||
| 190 | print("Unassociated Rule: rule_id:%s" % rule_id) |
||
| 191 | |||
| 192 | args.ovals_only = save_ovals_only |
||
| 193 | args.fixes_only = save_fixes_only |
||
| 194 | args.stict = save_strict |
||
| 195 | |||
| 196 | |||
| 197 | def main(): |
||
| 198 | args = parse_args() |
||
| 199 | |||
| 200 | linux_products, other_products = ssg.products.get_all(SSG_ROOT) |
||
| 201 | all_products = linux_products.union(other_products) |
||
| 202 | |||
| 203 | json_file = open(args.input, 'r') |
||
| 204 | known_rules = json.load(json_file) |
||
| 205 | |||
| 206 | if args.products.lower() == 'all': |
||
| 207 | args.products = all_products |
||
| 208 | elif args.products.lower() == 'linux': |
||
| 209 | args.products = linux_products |
||
| 210 | elif args.products.lower() == 'other': |
||
| 211 | args.products = other_products |
||
| 212 | else: |
||
| 213 | args.products = args.products.split(',') |
||
| 214 | args.products = set(args.products) |
||
| 215 | |||
| 216 | args.query = rds.filter_rule_ids(set(known_rules), args.query) |
||
| 217 | |||
| 218 | if not args.missing and not args.two_plus and not args.prodtypes and not args.introspect and not args.unassociated and not args.product_names: |
||
| 219 | args.missing = True |
||
| 220 | args.two_plus = True |
||
| 221 | args.prodtypes = True |
||
| 222 | |||
| 223 | print("Total number of known rule directories: %d" % len(known_rules)) |
||
| 224 | print("Total number of queried rules: %d\n" % len(args.query)) |
||
| 225 | |||
| 226 | if args.missing: |
||
| 227 | process_missing(args, known_rules) |
||
| 228 | if args.two_plus: |
||
| 229 | process_two_plus(args, known_rules) |
||
| 230 | if args.prodtypes: |
||
| 231 | process_prodtypes(args, known_rules) |
||
| 232 | if args.product_names: |
||
| 233 | process_product_names(args, known_rules) |
||
| 234 | if args.introspect and args.query: |
||
| 235 | process_introspection(args, known_rules) |
||
| 236 | if args.unassociated: |
||
| 237 | process_unassociated(args, known_rules, all_products) |
||
| 238 | |||
| 239 | if __name__ == "__main__": |
||
| 240 | main() |
||
| 241 |