1 | #!/usr/bin/python3 |
||
2 | |||
3 | from __future__ import print_function |
||
4 | |||
5 | import json |
||
6 | import argparse |
||
7 | import jinja2 |
||
8 | import os |
||
9 | import os.path |
||
10 | import sys |
||
11 | |||
12 | try: |
||
13 | import ssg.build_profile |
||
14 | import ssg.constants |
||
15 | import ssg.environment |
||
16 | import ssg.xml |
||
17 | import ssg.build_yaml |
||
18 | from ssg.utils import mkdir_p |
||
19 | except ImportError: |
||
20 | print("The ssg module could not be found.") |
||
21 | print("Run .pyenv.sh available in the project root diretory," |
||
22 | " or add it to PYTHONPATH manually.") |
||
23 | print("$ source .pyenv.sh") |
||
24 | exit(1) |
||
25 | |||
26 | |||
27 | def parse_args(): |
||
28 | script_desc = \ |
||
29 | "Obtains and displays XCCDF profile statistics. Namely number " + \ |
||
30 | "of rules in the profile, how many of these rules have their OVAL " + \ |
||
31 | "check implemented, how many have a remediation available, ..." |
||
32 | |||
33 | parser = argparse.ArgumentParser(description="Profile statistics and utilities tool") |
||
34 | subparsers = parser.add_subparsers(title='subcommands', dest="subcommand") |
||
35 | parser_stats = subparsers.add_parser("stats", description=script_desc, |
||
36 | help=("Show profile statistics")) |
||
37 | parser_stats.add_argument("--profile", "-p", |
||
38 | action="store", |
||
39 | help="Show statistics for this XCCDF Profile only. If " |
||
40 | "not provided the script will show stats for all " |
||
41 | "available profiles.") |
||
42 | parser_stats.add_argument( |
||
43 | "--benchmark", "-b", required=True, action="store", |
||
44 | help="Specify XCCDF file or a SCAP source data stream file to act on.") |
||
45 | parser_stats.add_argument("--implemented-ovals", default=False, |
||
46 | action="store_true", dest="implemented_ovals", |
||
47 | help="Show IDs of implemented OVAL checks.") |
||
48 | parser_stats.add_argument("--implemented-sces", default=False, |
||
49 | action="store_true", dest="implemented_sces", |
||
50 | help="Show IDs of implemented SCE checks.") |
||
51 | parser_stats.add_argument("--missing-stig-ids", default=False, |
||
52 | action="store_true", dest="missing_stig_ids", |
||
53 | help="Show rules in STIG profiles that don't have STIG IDs.") |
||
54 | parser_stats.add_argument("--missing-cis-refs", default=False, |
||
55 | action="store_true", dest="missing_cis_refs", |
||
56 | help="Show rules in CIS profiles that don't have CIS references.") |
||
57 | parser_stats.add_argument("--missing-hipaa-refs", default=False, |
||
58 | action="store_true", dest="missing_hipaa_refs", |
||
59 | help="Show rules in HIPAA profiles that don't have HIPAA references.") |
||
60 | parser_stats.add_argument("--missing-anssi-refs", default=False, |
||
61 | action="store_true", dest="missing_anssi_refs", |
||
62 | help="Show rules in ANSSI profiles that don't have ANSSI references.") |
||
63 | parser_stats.add_argument("--missing-ospp-refs", default=False, |
||
64 | action="store_true", dest="missing_ospp_refs", |
||
65 | help="Show rules in OSPP profiles that don't have OSPP references.") |
||
66 | parser_stats.add_argument("--missing-cui-refs", default=False, |
||
67 | action="store_true", dest="missing_cui_refs", |
||
68 | help="Show rules in CUI profiles that don't have CUI references.") |
||
69 | parser_stats.add_argument("--missing-ovals", default=False, |
||
70 | action="store_true", dest="missing_ovals", |
||
71 | help="Show IDs of unimplemented OVAL checks.") |
||
72 | parser_stats.add_argument("--missing-sces", default=False, |
||
73 | action="store_true", dest="missing_sces", |
||
74 | help="Show IDs of unimplemented SCE checks.") |
||
75 | parser_stats.add_argument("--implemented-fixes", default=False, |
||
76 | action="store_true", dest="implemented_fixes", |
||
77 | help="Show IDs of implemented remediations.") |
||
78 | parser_stats.add_argument("--missing-fixes", default=False, |
||
79 | action="store_true", dest="missing_fixes", |
||
80 | help="Show IDs of unimplemented remediations.") |
||
81 | parser_stats.add_argument("--assigned-cces", default=False, |
||
82 | action="store_true", dest="assigned_cces", |
||
83 | help="Show IDs of rules having CCE assigned.") |
||
84 | parser_stats.add_argument("--missing-cces", default=False, |
||
85 | action="store_true", dest="missing_cces", |
||
86 | help="Show IDs of rules missing CCE element.") |
||
87 | parser_stats.add_argument("--implemented", default=False, |
||
88 | action="store_true", |
||
89 | help="Equivalent of --implemented-ovals, " |
||
90 | "--implemented_fixes and --assigned-cves " |
||
91 | "all being set.") |
||
92 | parser_stats.add_argument("--missing", default=False, |
||
93 | action="store_true", |
||
94 | help="Equivalent of --missing-ovals, --missing-fixes" |
||
95 | " and --missing-cces all being set.") |
||
96 | parser_stats.add_argument("--ansible-parity", |
||
97 | action="store_true", |
||
98 | help="Show IDs of rules with Bash fix which miss Ansible fix." |
||
99 | " Rules missing both Bash and Ansible are not shown.") |
||
100 | parser_stats.add_argument("--all", default=False, |
||
101 | action="store_true", dest="all", |
||
102 | help="Show all available statistics.") |
||
103 | parser_stats.add_argument("--product", action="store", dest="product", |
||
104 | help="Product directory to evaluate XCCDF under " |
||
105 | "(e.g., ~/scap-security-guide/rhel8)") |
||
106 | parser_stats.add_argument("--skip-stats", default=False, |
||
107 | action="store_true", dest="skip_overall_stats", |
||
108 | help="Do not show overall statistics.") |
||
109 | parser_stats.add_argument("--format", default="plain", |
||
110 | choices=["plain", "json", "csv", "html"], |
||
111 | help="Which format to use for output.") |
||
112 | parser_stats.add_argument("--output", |
||
113 | help="If defined, statistics will be stored under this directory.") |
||
114 | |||
115 | subtracted_profile_desc = \ |
||
116 | "Subtract rules and variable selections from profile1 based on rules present in " + \ |
||
117 | "profile2. As a result, a new profile is generated. It doesn't support profile " + \ |
||
118 | "inheritance, this means that only rules explicitly " + \ |
||
119 | "listed in the profiles will be taken in account." |
||
120 | |||
121 | parser_sub = subparsers.add_parser("sub", description=subtracted_profile_desc, |
||
122 | help=("Subtract rules and variables from profile1 " |
||
123 | "based on selections present in profile2.")) |
||
124 | parser_sub.add_argument( |
||
125 | "--build-config-yaml", required=True, |
||
126 | help="YAML file with information about the build configuration. " |
||
127 | "e.g.: ~/scap-security-guide/build/build_config.yml " |
||
128 | "needed for autodetection of profile root" |
||
129 | ) |
||
130 | parser_sub.add_argument( |
||
131 | "--ssg-root", required=True, |
||
132 | help="Directory containing the source tree. " |
||
133 | "e.g. ~/scap-security-guide/" |
||
134 | ) |
||
135 | parser_sub.add_argument( |
||
136 | "--product", required=True, |
||
137 | help="ID of the product for which we are building Playbooks. " |
||
138 | "e.g.: 'fedora'" |
||
139 | ) |
||
140 | parser_sub.add_argument('--profile1', type=str, dest="profile1", |
||
141 | required=True, help='YAML profile') |
||
142 | parser_sub.add_argument('--profile2', type=str, dest="profile2", |
||
143 | required=True, help='YAML profile') |
||
144 | |||
145 | args = parser.parse_args() |
||
146 | |||
147 | if not args.subcommand: |
||
148 | parser.print_help() |
||
149 | exit(0) |
||
150 | |||
151 | if args.subcommand == "stats": |
||
152 | if args.all: |
||
153 | args.implemented = True |
||
154 | args.missing = True |
||
155 | args.ansible_parity = True |
||
156 | |||
157 | if args.implemented: |
||
158 | args.implemented_ovals = True |
||
159 | args.implemented_fixes = True |
||
160 | args.assigned_cces = True |
||
161 | |||
162 | if args.missing: |
||
163 | args.missing_ovals = True |
||
164 | args.missing_sces = True |
||
165 | args.missing_fixes = True |
||
166 | args.missing_cces = True |
||
167 | args.missing_stig_ids = True |
||
168 | args.missing_cis_refs = True |
||
169 | args.missing_hipaa_refs = True |
||
170 | args.missing_anssi_refs = True |
||
171 | args.missing_ospp_refs = True |
||
172 | args.missing_cui_refs = True |
||
173 | |||
174 | return args |
||
175 | |||
176 | |||
177 | def main(): |
||
178 | args = parse_args() |
||
179 | |||
180 | if args.subcommand == "sub": |
||
181 | product_yaml = os.path.join(args.ssg_root, "products", args.product, "product.yml") |
||
182 | env_yaml = ssg.environment.open_environment(args.build_config_yaml, product_yaml) |
||
183 | try: |
||
184 | profile1 = ssg.build_yaml.Profile.from_yaml(args.profile1, env_yaml) |
||
185 | profile2 = ssg.build_yaml.Profile.from_yaml(args.profile2, env_yaml) |
||
186 | except jinja2.exceptions.TemplateNotFound as e: |
||
187 | print("Error: Profile {} could not be found.".format(str(e))) |
||
188 | exit(1) |
||
189 | |||
190 | subtracted_profile = profile1 - profile2 |
||
0 ignored issues
–
show
introduced
by
![]() |
|||
191 | |||
192 | exclusive_rules = len(subtracted_profile.get_rule_selectors()) |
||
193 | exclusive_vars = len(subtracted_profile.get_variable_selectors()) |
||
194 | if exclusive_rules > 0: |
||
195 | print("{} rules were left after subtraction.".format(exclusive_rules)) |
||
196 | if exclusive_vars > 0: |
||
197 | print("{} variables were left after subtraction.".format(exclusive_vars)) |
||
198 | |||
199 | if exclusive_rules > 0 or exclusive_vars > 0: |
||
200 | profile1_basename = os.path.splitext( |
||
201 | os.path.basename(args.profile1))[0] |
||
202 | profile2_basename = os.path.splitext( |
||
203 | os.path.basename(args.profile2))[0] |
||
204 | |||
205 | subtracted_profile_filename = "{}_sub_{}.profile".format( |
||
206 | profile1_basename, profile2_basename) |
||
207 | print("Creating a new profile containing the exclusive selections: {}".format( |
||
208 | subtracted_profile_filename)) |
||
209 | |||
210 | subtracted_profile.title = profile1.title + " subtracted by " + profile2.title |
||
211 | subtracted_profile.dump_yaml(subtracted_profile_filename) |
||
212 | print("Profile {} was created successfully".format( |
||
213 | subtracted_profile_filename)) |
||
214 | else: |
||
215 | print("Subtraction would produce an empty profile. No new profile was generated") |
||
216 | exit(0) |
||
217 | |||
218 | benchmark = ssg.build_profile.XCCDFBenchmark(args.benchmark, args.product) |
||
219 | ret = [] |
||
220 | if args.profile: |
||
221 | ret.append(benchmark.show_profile_stats(args.profile, args)) |
||
222 | else: |
||
223 | ret.extend(benchmark.show_all_profile_stats(args)) |
||
224 | |||
225 | if args.format == "json": |
||
226 | print(json.dumps(ret, indent=4)) |
||
227 | if args.format == "html": |
||
228 | from json2html import json2html |
||
229 | filtered_output = [] |
||
230 | output_path = "./" |
||
231 | if args.output: |
||
232 | output_path = args.output |
||
233 | mkdir_p(output_path) |
||
234 | |||
235 | content_path = os.path.join(output_path, "content") |
||
236 | mkdir_p(content_path) |
||
237 | |||
238 | content_list = [ |
||
239 | 'rules', |
||
240 | 'missing_stig_ids', |
||
241 | 'missing_cis_refs', |
||
242 | 'missing_hipaa_refs', |
||
243 | 'missing_anssi_refs', |
||
244 | 'missing_ospp_refs', |
||
245 | 'missing_cui_refs', |
||
246 | 'missing_ovals', |
||
247 | 'missing_sces', |
||
248 | 'missing_bash_fixes', |
||
249 | 'missing_ansible_fixes', |
||
250 | 'missing_ignition_fixes', |
||
251 | 'missing_kubernetes_fixes', |
||
252 | 'missing_puppet_fixes', |
||
253 | 'missing_anaconda_fixes', |
||
254 | 'missing_cces', |
||
255 | 'ansible_parity', |
||
256 | 'implemented_checks', |
||
257 | 'implemented_fixes', |
||
258 | 'missing_checks', |
||
259 | 'missing_fixes' |
||
260 | ] |
||
261 | link = """<a href="{}"><div style="height:100%;width:100%">{}</div></a>""" |
||
262 | |||
263 | for profile in ret: |
||
264 | bash_fixes_count = profile['rules_count'] - profile['missing_bash_fixes_count'] |
||
265 | for content in content_list: |
||
266 | content_file = "{}_{}.txt".format(profile['profile_id'], content) |
||
267 | content_filepath = os.path.join("content", content_file) |
||
268 | count = len(profile[content]) |
||
269 | if count > 0: |
||
270 | if content == "ansible_parity": |
||
271 | #custom text link for ansible parity |
||
272 | count = link.format(content_filepath, "{} out of {} ({}%)".format(bash_fixes_count-count, bash_fixes_count, int(((bash_fixes_count-count)/bash_fixes_count)*100))) |
||
273 | count_href_element = link.format(content_filepath, count) |
||
274 | profile['{}_count'.format(content)] = count_href_element |
||
275 | with open(os.path.join(content_path, content_file), 'w+') as f: |
||
276 | f.write('\n'.join(profile[content])) |
||
277 | else: |
||
278 | profile['{}_count'.format(content)] = count |
||
279 | |||
280 | del profile[content] |
||
281 | filtered_output.append(profile) |
||
282 | |||
283 | with open(os.path.join(output_path, "statistics.html"), 'w+') as f: |
||
284 | f.write(json2html.convert(json=json.dumps(filtered_output), escape=False)) |
||
285 | |||
286 | elif args.format == "csv": |
||
287 | # we can assume ret has at least one element |
||
288 | # CSV header |
||
289 | print(",".join(ret[0].keys())) |
||
290 | for line in ret: |
||
291 | print(",".join([str(value) for value in line.values()])) |
||
292 | |||
293 | |||
294 | if __name__ == '__main__': |
||
295 | main() |
||
296 |