1
|
|
|
#!/usr/bin/env python2 |
2
|
|
|
from __future__ import print_function |
3
|
|
|
|
4
|
|
|
import logging |
5
|
|
|
import os.path |
6
|
|
|
import re |
7
|
|
|
import collections |
8
|
|
|
import xml.etree.ElementTree |
9
|
|
|
import json |
10
|
|
|
import datetime |
11
|
|
|
import socket |
12
|
|
|
import sys |
13
|
|
|
import time |
14
|
|
|
|
15
|
|
|
from ssg.constants import OSCAP_PROFILE_ALL_ID |
16
|
|
|
|
17
|
|
|
from ssg_test_suite.log import LogHelper |
18
|
|
|
from ssg_test_suite import test_env |
19
|
|
|
from ssg_test_suite import common |
20
|
|
|
|
21
|
|
|
from ssg.shims import input_func |
22
|
|
|
|
23
|
|
|
# Needed for compatibility as there is no TimeoutError in python2. |
24
|
|
|
if sys.version_info[0] < 3: |
25
|
|
|
TimeoutException = socket.timeout |
26
|
|
|
else: |
27
|
|
|
TimeoutException = TimeoutError |
28
|
|
|
|
29
|
|
|
logging.getLogger(__name__).addHandler(logging.NullHandler()) |
30
|
|
|
|
31
|
|
|
_CONTEXT_RETURN_CODES = {'pass': 0, |
32
|
|
|
'fail': 2, |
33
|
|
|
'error': 1, |
34
|
|
|
'notapplicable': 0, |
35
|
|
|
'fixed': 0} |
36
|
|
|
|
37
|
|
|
_ANSIBLE_TEMPLATE = 'urn:xccdf:fix:script:ansible' |
38
|
|
|
_BASH_TEMPLATE = 'urn:xccdf:fix:script:sh' |
39
|
|
|
_XCCDF_NS = 'http://checklists.nist.gov/xccdf/1.2' |
40
|
|
|
|
41
|
|
|
|
42
|
|
|
PROFILE_ALL_ID_SINGLE_QUOTED = False |
43
|
|
|
|
44
|
|
|
|
45
|
|
|
def analysis_to_serializable(analysis): |
46
|
|
|
result = dict(analysis) |
47
|
|
|
for key, value in analysis.items(): |
48
|
|
|
if type(value) == set: |
49
|
|
|
result[key] = tuple(value) |
50
|
|
|
return result |
51
|
|
|
|
52
|
|
|
|
53
|
|
|
def save_analysis_to_json(analysis, output_fname): |
54
|
|
|
analysis2 = analysis_to_serializable(analysis) |
55
|
|
|
with open(output_fname, "w") as f: |
56
|
|
|
json.dump(analysis2, f) |
57
|
|
|
|
58
|
|
|
|
59
|
|
|
def triage_xml_results(fname): |
60
|
|
|
tree = xml.etree.ElementTree.parse(fname) |
61
|
|
|
all_xml_results = tree.findall(".//{%s}rule-result" % _XCCDF_NS) |
62
|
|
|
|
63
|
|
|
triaged = collections.defaultdict(set) |
64
|
|
|
for result in list(all_xml_results): |
65
|
|
|
idref = result.get("idref") |
66
|
|
|
status = result.find("{%s}result" % _XCCDF_NS).text |
67
|
|
|
triaged[status].add(idref) |
68
|
|
|
|
69
|
|
|
return triaged |
70
|
|
|
|
71
|
|
|
|
72
|
|
|
def send_files_remote(verbose_path, remote_dir, domain_ip, *files): |
73
|
|
|
"""Upload files to VM.""" |
74
|
|
|
# files is a list of absolute paths on the host |
75
|
|
|
success = True |
76
|
|
|
destination = 'root@{0}:{1}'.format(domain_ip, remote_dir) |
77
|
|
|
files_string = ' '.join(files) |
78
|
|
|
|
79
|
|
|
logging.debug('Uploading files {0} to {1}'.format(files_string, |
80
|
|
|
destination)) |
81
|
|
|
command = ['scp'] + list(common.SSH_ADDITIONAL_OPTS) + list(files) + [destination] |
82
|
|
|
if common.run_cmd_local(command, verbose_path)[0] != 0: |
83
|
|
|
logging.error('Failed to upload files {0}'.format(files_string)) |
84
|
|
|
success = False |
85
|
|
|
return success |
86
|
|
|
|
87
|
|
|
|
88
|
|
|
def get_file_remote(test_env, verbose_path, local_dir, remote_path): |
89
|
|
|
"""Download a file from VM.""" |
90
|
|
|
# remote_path is an absolute path of a file on remote machine |
91
|
|
|
success = True |
92
|
|
|
logging.debug('Downloading remote file {0} to {1}' |
93
|
|
|
.format(remote_path, local_dir)) |
94
|
|
|
with open(verbose_path, "a") as log_file: |
95
|
|
|
try: |
96
|
|
|
test_env.scp_download_file(remote_path, local_dir, log_file) |
97
|
|
|
except Exception: |
98
|
|
|
logging.error('Failed to download file {0}'.format(remote_path)) |
99
|
|
|
success = False |
100
|
|
|
return success |
101
|
|
|
|
102
|
|
|
|
103
|
|
|
def find_result_id_in_output(output): |
104
|
|
|
match = re.search('result id.*$', output, re.IGNORECASE | re.MULTILINE) |
105
|
|
|
if match is None: |
106
|
|
|
return None |
107
|
|
|
# Return the right most word of the match which is the result id. |
108
|
|
|
return match.group(0).split()[-1] |
109
|
|
|
|
110
|
|
|
|
111
|
|
|
def get_result_id_from_arf(arf_path, verbose_path): |
112
|
|
|
command = ['oscap', 'info', arf_path] |
113
|
|
|
command_string = ' '.join(command) |
114
|
|
|
returncode, output = common.run_cmd_local(command, verbose_path) |
115
|
|
|
if returncode != 0: |
116
|
|
|
raise RuntimeError('{0} returned {1} exit code'. |
117
|
|
|
format(command_string, returncode)) |
118
|
|
|
res_id = find_result_id_in_output(output) |
119
|
|
|
if res_id is None: |
120
|
|
|
raise RuntimeError('Failed to find result ID in {0}' |
121
|
|
|
.format(arf_path)) |
122
|
|
|
return res_id |
123
|
|
|
|
124
|
|
|
|
125
|
|
|
def single_quote_string(input): |
126
|
|
|
result = input |
127
|
|
|
for char in "\"'": |
128
|
|
|
result = result.replace(char, "") |
129
|
|
|
return "'{}'".format(result) |
130
|
|
|
|
131
|
|
|
|
132
|
|
|
def generate_fixes_remotely(test_env, formatting, verbose_path): |
133
|
|
|
command_base = ['oscap', 'xccdf', 'generate', 'fix'] |
134
|
|
|
command_options = [ |
135
|
|
|
'--benchmark-id', formatting['benchmark_id'], |
136
|
|
|
'--profile', formatting['profile'], |
137
|
|
|
'--template', formatting['output_template'], |
138
|
|
|
'--output', '/{output_file}'.format(** formatting), |
139
|
|
|
] |
140
|
|
|
command_operands = ['/{arf_file}'.format(** formatting)] |
141
|
|
|
if 'result_id' in formatting: |
142
|
|
|
command_options.extend(['--result-id', formatting['result_id']]) |
143
|
|
|
|
144
|
|
|
command_components = command_base + command_options + command_operands |
145
|
|
|
command_string = ' '.join([single_quote_string(c) for c in command_components]) |
146
|
|
|
with open(verbose_path, "a") as log_file: |
147
|
|
|
test_env.execute_ssh_command(command_string, log_file) |
148
|
|
|
|
149
|
|
|
|
150
|
|
|
def run_stage_remediation_ansible(run_type, test_env, formatting, verbose_path): |
151
|
|
|
""" |
152
|
|
|
Returns False on error, or True in case of successful Ansible playbook |
153
|
|
|
run.""" |
154
|
|
|
formatting['output_template'] = _ANSIBLE_TEMPLATE |
155
|
|
|
send_arf_to_remote_machine_and_generate_remediations_there( |
156
|
|
|
run_type, test_env, formatting, verbose_path) |
157
|
|
|
if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR, |
158
|
|
|
'/' + formatting['output_file']): |
159
|
|
|
return False |
160
|
|
|
command = ( |
161
|
|
|
'ansible-playbook', '-v', '-i', '{0},'.format(formatting['domain_ip']), |
162
|
|
|
'-u' 'root', '--ssh-common-args={0}'.format(' '.join(test_env.ssh_additional_options)), |
163
|
|
|
formatting['playbook']) |
164
|
|
|
command_string = ' '.join(command) |
165
|
|
|
returncode, output = common.run_cmd_local(command, verbose_path) |
166
|
|
|
# Appends output of ansible-playbook to the verbose_path file. |
167
|
|
|
with open(verbose_path, 'ab') as f: |
168
|
|
|
f.write('Stdout of "{}":'.format(command_string).encode("utf-8")) |
169
|
|
|
f.write(output.encode("utf-8")) |
170
|
|
|
if returncode != 0: |
171
|
|
|
msg = ( |
172
|
|
|
'Ansible playbook remediation run has ' |
173
|
|
|
'exited with return code {} instead of expected 0' |
174
|
|
|
.format(returncode)) |
175
|
|
|
LogHelper.preload_log(logging.ERROR, msg, 'fail') |
176
|
|
|
return False |
177
|
|
|
return True |
178
|
|
|
|
179
|
|
|
|
180
|
|
|
def run_stage_remediation_bash(run_type, test_env, formatting, verbose_path): |
181
|
|
|
""" |
182
|
|
|
Returns False on error, or True in case of successful bash scripts |
183
|
|
|
run.""" |
184
|
|
|
formatting['output_template'] = _BASH_TEMPLATE |
185
|
|
|
send_arf_to_remote_machine_and_generate_remediations_there( |
186
|
|
|
run_type, test_env, formatting, verbose_path) |
187
|
|
|
if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR, |
188
|
|
|
'/' + formatting['output_file']): |
189
|
|
|
return False |
190
|
|
|
|
191
|
|
|
command_string = '/bin/bash -x /{output_file}'.format(** formatting) |
192
|
|
|
|
193
|
|
|
with open(verbose_path, "a") as log_file: |
194
|
|
|
try: |
195
|
|
|
test_env.execute_ssh_command(command_string, log_file) |
196
|
|
|
except Exception as exc: |
197
|
|
|
msg = ( |
198
|
|
|
'Bash script remediation run has exited with return code {} ' |
199
|
|
|
'instead of expected 0'.format(exc.returncode)) |
200
|
|
|
LogHelper.preload_log(logging.ERROR, msg, 'fail') |
201
|
|
|
return False |
202
|
|
|
return True |
203
|
|
|
|
204
|
|
|
|
205
|
|
|
def send_arf_to_remote_machine_and_generate_remediations_there( |
206
|
|
|
run_type, test_env, formatting, verbose_path): |
207
|
|
|
if run_type == 'rule': |
208
|
|
|
try: |
209
|
|
|
res_id = get_result_id_from_arf(formatting['arf'], verbose_path) |
210
|
|
|
except Exception as exc: |
211
|
|
|
logging.error(str(exc)) |
212
|
|
|
return False |
213
|
|
|
formatting['result_id'] = res_id |
214
|
|
|
|
215
|
|
|
with open(verbose_path, "a") as log_file: |
216
|
|
|
try: |
217
|
|
|
test_env.scp_upload_file(formatting["arf"], "/", log_file) |
218
|
|
|
except Exception: |
219
|
|
|
return False |
220
|
|
|
|
221
|
|
|
try: |
222
|
|
|
generate_fixes_remotely(test_env, formatting, verbose_path) |
223
|
|
|
except Exception as exc: |
224
|
|
|
logging.error(str(exc)) |
225
|
|
|
return False |
226
|
|
|
|
227
|
|
|
|
228
|
|
|
def is_virtual_oscap_profile(profile): |
229
|
|
|
""" Test if the profile belongs to the so called category virtual |
230
|
|
|
from OpenSCAP available profiles. It can be (all) or other id we |
231
|
|
|
might come up in the future, it just needs to be encapsulated |
232
|
|
|
with parenthesis for example "(custom_profile)". |
233
|
|
|
""" |
234
|
|
|
if profile is not None: |
235
|
|
|
if profile == OSCAP_PROFILE_ALL_ID: |
236
|
|
|
return True |
237
|
|
|
else: |
238
|
|
|
if "(" == profile[:1] and ")" == profile[-1:]: |
239
|
|
|
return True |
240
|
|
|
return False |
241
|
|
|
|
242
|
|
|
|
243
|
|
|
def process_profile_id(profile): |
244
|
|
|
# Detect if the profile is virtual and include single quotes if needed. |
245
|
|
|
if is_virtual_oscap_profile(profile): |
246
|
|
|
if PROFILE_ALL_ID_SINGLE_QUOTED: |
247
|
|
|
return "'{}'".format(profile) |
248
|
|
|
else: |
249
|
|
|
return profile |
250
|
|
|
else: |
251
|
|
|
return profile |
252
|
|
|
|
253
|
|
|
|
254
|
|
|
class GenericRunner(object): |
255
|
|
|
def __init__(self, environment, profile, datastream, benchmark_id): |
256
|
|
|
self.environment = environment |
257
|
|
|
self.profile = profile |
258
|
|
|
self.datastream = datastream |
259
|
|
|
self.benchmark_id = benchmark_id |
260
|
|
|
|
261
|
|
|
self.arf_file = '' |
262
|
|
|
self.arf_path = '' |
263
|
|
|
self.verbose_path = '' |
264
|
|
|
self.report_path = '' |
265
|
|
|
self.results_path = '' |
266
|
|
|
self.stage = 'undefined' |
267
|
|
|
|
268
|
|
|
self.clean_files = False |
269
|
|
|
self.manual_debug = False |
270
|
|
|
self._filenames_to_clean_afterwards = set() |
271
|
|
|
|
272
|
|
|
self.command_base = [] |
273
|
|
|
self.command_options = [] |
274
|
|
|
self.command_operands = [] |
275
|
|
|
# number of seconds to sleep after reboot of vm to let |
276
|
|
|
# the system to finish startup, there were problems with |
277
|
|
|
# temporary files created by Dracut during image generation interfering |
278
|
|
|
# with the scan |
279
|
|
|
self.time_to_finish_startup = 30 |
280
|
|
|
|
281
|
|
|
def _make_arf_path(self): |
282
|
|
|
self.arf_file = self._get_arf_file() |
283
|
|
|
self.arf_path = os.path.join(LogHelper.LOG_DIR, self.arf_file) |
284
|
|
|
|
285
|
|
|
def _get_arf_file(self): |
286
|
|
|
raise NotImplementedError() |
287
|
|
|
|
288
|
|
|
def _make_verbose_path(self): |
289
|
|
|
verbose_file = self._get_verbose_file() |
290
|
|
|
verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_file) |
291
|
|
|
self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log') |
292
|
|
|
|
293
|
|
|
def _get_verbose_file(self): |
294
|
|
|
raise NotImplementedError() |
295
|
|
|
|
296
|
|
|
def _make_report_path(self): |
297
|
|
|
report_file = self._get_report_file() |
298
|
|
|
report_path = os.path.join(LogHelper.LOG_DIR, report_file) |
299
|
|
|
self.report_path = LogHelper.find_name(report_path, '.html') |
300
|
|
|
|
301
|
|
|
def _get_report_file(self): |
302
|
|
|
raise NotImplementedError() |
303
|
|
|
|
304
|
|
|
def _make_results_path(self): |
305
|
|
|
results_file = self._get_results_file() |
306
|
|
|
results_path = os.path.join(LogHelper.LOG_DIR, results_file) |
307
|
|
|
self.results_path = LogHelper.find_name(results_path, '.xml') |
308
|
|
|
|
309
|
|
|
def _get_results_file(self): |
310
|
|
|
raise NotImplementedError() |
311
|
|
|
|
312
|
|
|
def _generate_report_file(self): |
313
|
|
|
self.command_options.extend([ |
314
|
|
|
'--report', self.report_path, |
315
|
|
|
]) |
316
|
|
|
self._filenames_to_clean_afterwards.add(self.report_path) |
317
|
|
|
|
318
|
|
|
def _wait_for_continue(self): |
319
|
|
|
""" In case user requests to leave machine in failed state for hands |
320
|
|
|
on debugging, ask for keypress to continue.""" |
321
|
|
|
input_func("Paused for manual debugging. Continue by pressing return.") |
322
|
|
|
|
323
|
|
|
def prepare_online_scanning_arguments(self): |
324
|
|
|
self.command_options.extend([ |
325
|
|
|
'--benchmark-id', self.benchmark_id, |
326
|
|
|
'--profile', self.profile, |
327
|
|
|
'--progress', '--oval-results', |
328
|
|
|
]) |
329
|
|
|
self.command_operands.append(self.datastream) |
330
|
|
|
|
331
|
|
|
def run_stage(self, stage): |
332
|
|
|
self.stage = stage |
333
|
|
|
|
334
|
|
|
self._make_verbose_path() |
335
|
|
|
self._make_report_path() |
336
|
|
|
self._make_arf_path() |
337
|
|
|
self._make_results_path() |
338
|
|
|
|
339
|
|
|
self.command_base = [] |
340
|
|
|
self.command_options = ['--verbose', 'DEVEL'] |
341
|
|
|
self.command_operands = [] |
342
|
|
|
|
343
|
|
|
result = None |
344
|
|
|
if stage == 'initial': |
345
|
|
|
result = self.initial() |
346
|
|
|
elif stage == 'remediation': |
347
|
|
|
result = self.remediation() |
348
|
|
|
elif stage == 'final': |
349
|
|
|
result = self.final() |
350
|
|
|
else: |
351
|
|
|
raise RuntimeError('Unknown stage: {}.'.format(stage)) |
352
|
|
|
|
353
|
|
|
if self.clean_files: |
354
|
|
|
for fname in tuple(self._filenames_to_clean_afterwards): |
355
|
|
|
try: |
356
|
|
|
os.remove(fname) |
357
|
|
|
except OSError: |
358
|
|
|
logging.error( |
359
|
|
|
"Failed to cleanup file '{0}'" |
360
|
|
|
.format(fname)) |
361
|
|
|
finally: |
362
|
|
|
self._filenames_to_clean_afterwards.remove(fname) |
363
|
|
|
|
364
|
|
|
if result == 1: |
365
|
|
|
LogHelper.log_preloaded('pass') |
366
|
|
|
if self.clean_files: |
367
|
|
|
files_to_remove = [self.verbose_path] |
368
|
|
|
if stage in ['initial', 'final']: |
369
|
|
|
files_to_remove.append(self.results_path) |
370
|
|
|
|
371
|
|
|
for fname in tuple(files_to_remove): |
372
|
|
|
try: |
373
|
|
|
if os.path.exists(fname): |
374
|
|
|
os.remove(fname) |
375
|
|
|
except OSError: |
376
|
|
|
logging.error( |
377
|
|
|
"Failed to cleanup file '{0}'" |
378
|
|
|
.format(fname)) |
379
|
|
|
elif result == 2: |
380
|
|
|
LogHelper.log_preloaded('notapplicable') |
381
|
|
|
else: |
382
|
|
|
LogHelper.log_preloaded('fail') |
383
|
|
|
if self.manual_debug: |
384
|
|
|
self._wait_for_continue() |
385
|
|
|
return result |
386
|
|
|
|
387
|
|
|
@property |
388
|
|
|
def get_command(self): |
389
|
|
|
return self.command_base + self.command_options + self.command_operands |
390
|
|
|
|
391
|
|
|
def make_oscap_call(self): |
392
|
|
|
raise NotImplementedError() |
393
|
|
|
|
394
|
|
|
def initial(self): |
395
|
|
|
self.command_options += ['--results', self.results_path] |
396
|
|
|
result = self.make_oscap_call() |
397
|
|
|
return result |
398
|
|
|
|
399
|
|
|
def remediation(self): |
400
|
|
|
raise NotImplementedError() |
401
|
|
|
|
402
|
|
|
def final(self): |
403
|
|
|
self.command_options += ['--results', self.results_path] |
404
|
|
|
result = self.make_oscap_call() |
405
|
|
|
return result |
406
|
|
|
|
407
|
|
|
def analyze(self, stage): |
408
|
|
|
triaged_results = triage_xml_results(self.results_path) |
409
|
|
|
triaged_results["stage"] = stage |
410
|
|
|
triaged_results["runner"] = self.__class__.__name__ |
411
|
|
|
return triaged_results |
412
|
|
|
|
413
|
|
|
def _get_formatting_dict_for_remediation(self): |
414
|
|
|
formatting = { |
415
|
|
|
'domain_ip': self.environment.domain_ip, |
416
|
|
|
'profile': self.profile, |
417
|
|
|
'datastream': self.datastream, |
418
|
|
|
'benchmark_id': self.benchmark_id |
419
|
|
|
} |
420
|
|
|
formatting['arf'] = self.arf_path |
421
|
|
|
formatting['arf_file'] = self.arf_file |
422
|
|
|
return formatting |
423
|
|
|
|
424
|
|
|
|
425
|
|
|
class ProfileRunner(GenericRunner): |
426
|
|
|
def _get_arf_file(self): |
427
|
|
|
return '{0}-initial-arf.xml'.format(self.profile) |
428
|
|
|
|
429
|
|
|
def _get_verbose_file(self): |
430
|
|
|
return '{0}-{1}'.format(self.profile, self.stage) |
431
|
|
|
|
432
|
|
|
def _get_report_file(self): |
433
|
|
|
return '{0}-{1}'.format(self.profile, self.stage) |
434
|
|
|
|
435
|
|
|
def _get_results_file(self): |
436
|
|
|
return '{0}-{1}-results'.format(self.profile, self.stage) |
437
|
|
|
|
438
|
|
|
def final(self): |
439
|
|
|
if self.environment.name == 'libvirt-based': |
440
|
|
|
logging.info("Rebooting domain '{0}' before final scan." |
441
|
|
|
.format(self.environment.domain_name)) |
442
|
|
|
self.environment.reboot() |
443
|
|
|
logging.info("Waiting for {0} seconds to let the system finish startup." |
444
|
|
|
.format(self.time_to_finish_startup)) |
445
|
|
|
time.sleep(self.time_to_finish_startup) |
446
|
|
|
return GenericRunner.final(self) |
447
|
|
|
|
448
|
|
|
def make_oscap_call(self): |
449
|
|
|
self.prepare_online_scanning_arguments() |
450
|
|
|
self._generate_report_file() |
451
|
|
|
returncode, self._oscap_output = self.environment.scan( |
452
|
|
|
self.command_options + self.command_operands, self.verbose_path) |
453
|
|
|
|
454
|
|
|
if returncode not in [0, 2]: |
455
|
|
|
logging.error(('Profile run should end with return code 0 or 2 ' |
456
|
|
|
'not "{0}" as it did!').format(returncode)) |
457
|
|
|
return False |
458
|
|
|
return True |
459
|
|
|
|
460
|
|
|
|
461
|
|
|
class RuleRunner(GenericRunner): |
462
|
|
|
def __init__( |
463
|
|
|
self, environment, profile, datastream, benchmark_id, |
464
|
|
|
rule_id, script_name, dont_clean, manual_debug): |
465
|
|
|
super(RuleRunner, self).__init__( |
466
|
|
|
environment, profile, datastream, benchmark_id, |
467
|
|
|
) |
468
|
|
|
|
469
|
|
|
self.rule_id = rule_id |
470
|
|
|
self.context = None |
471
|
|
|
self.script_name = script_name |
472
|
|
|
self.clean_files = not dont_clean |
473
|
|
|
self.manual_debug = manual_debug |
474
|
|
|
|
475
|
|
|
self._oscap_output = '' |
476
|
|
|
|
477
|
|
|
def _get_arf_file(self): |
478
|
|
|
return '{0}-initial-arf.xml'.format(self.rule_id) |
479
|
|
|
|
480
|
|
|
def _get_verbose_file(self): |
481
|
|
|
return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage) |
482
|
|
|
|
483
|
|
|
def _get_report_file(self): |
484
|
|
|
return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage) |
485
|
|
|
|
486
|
|
|
def _get_results_file(self): |
487
|
|
|
return '{0}-{1}-{2}-results-{3}'.format( |
488
|
|
|
self.rule_id, self.script_name, self.profile, self.stage) |
489
|
|
|
|
490
|
|
|
def make_oscap_call(self): |
491
|
|
|
self.prepare_online_scanning_arguments() |
492
|
|
|
self._generate_report_file() |
493
|
|
|
self.command_options.extend( |
494
|
|
|
['--rule', self.rule_id]) |
495
|
|
|
returncode, self._oscap_output = self.environment.scan( |
496
|
|
|
self.command_options + self.command_operands, self.verbose_path) |
497
|
|
|
|
498
|
|
|
return self._analyze_output_of_oscap_call() |
499
|
|
|
|
500
|
|
|
def final(self): |
501
|
|
|
success = super(RuleRunner, self).final() |
502
|
|
|
success = success and self._analyze_output_of_oscap_call() |
503
|
|
|
|
504
|
|
|
return success |
505
|
|
|
|
506
|
|
|
def _find_rule_result_in_output(self): |
507
|
|
|
# oscap --progress options outputs rule results to stdout in |
508
|
|
|
# following format: |
509
|
|
|
# xccdf_org....rule_accounts_password_minlen_login_defs:pass |
510
|
|
|
match = re.findall('{0}:(.*)$'.format(self.rule_id), |
511
|
|
|
self._oscap_output, |
512
|
|
|
re.MULTILINE) |
513
|
|
|
|
514
|
|
|
if not match: |
515
|
|
|
# When the rule is not selected, it won't match in output |
516
|
|
|
return "notselected" |
517
|
|
|
|
518
|
|
|
# When --remediation is executed, there will be two entries in |
519
|
|
|
# progress output, one for fail, and one for fixed, e.g. |
520
|
|
|
# xccdf_org....rule_accounts_password_minlen_login_defs:fail |
521
|
|
|
# xccdf_org....rule_accounts_password_minlen_login_defs:fixed |
522
|
|
|
# We are interested in the last one |
523
|
|
|
return match[-1] |
524
|
|
|
|
525
|
|
|
def _analyze_output_of_oscap_call(self): |
526
|
|
|
local_success = 1 |
527
|
|
|
# check expected result |
528
|
|
|
rule_result = self._find_rule_result_in_output() |
529
|
|
|
|
530
|
|
|
if rule_result == "notapplicable": |
531
|
|
|
msg = ( |
532
|
|
|
'Rule {0} evaluation resulted in {1}' |
533
|
|
|
.format(self.rule_id, rule_result)) |
534
|
|
|
LogHelper.preload_log(logging.WARNING, msg, 'notapplicable') |
535
|
|
|
local_success = 2 |
536
|
|
|
return local_success |
537
|
|
|
if rule_result != self.context: |
538
|
|
|
local_success = 0 |
539
|
|
|
if rule_result == 'notselected': |
540
|
|
|
msg = ( |
541
|
|
|
'Rule {0} has not been evaluated! ' |
542
|
|
|
'Wrong profile selected in test scenario?' |
543
|
|
|
.format(self.rule_id)) |
544
|
|
|
else: |
545
|
|
|
msg = ( |
546
|
|
|
'Rule evaluation resulted in {0}, ' |
547
|
|
|
'instead of expected {1} during {2} stage ' |
548
|
|
|
.format(rule_result, self.context, self.stage) |
549
|
|
|
) |
550
|
|
|
LogHelper.preload_log(logging.ERROR, msg, 'fail') |
551
|
|
|
return local_success |
552
|
|
|
|
553
|
|
|
def _get_formatting_dict_for_remediation(self): |
554
|
|
|
fmt = super(RuleRunner, self)._get_formatting_dict_for_remediation() |
555
|
|
|
fmt['rule_id'] = self.rule_id |
556
|
|
|
|
557
|
|
|
return fmt |
558
|
|
|
|
559
|
|
|
def run_stage_with_context(self, stage, context): |
560
|
|
|
self.context = context |
561
|
|
|
return self.run_stage(stage) |
562
|
|
|
|
563
|
|
|
|
564
|
|
|
class OscapProfileRunner(ProfileRunner): |
565
|
|
|
def remediation(self): |
566
|
|
|
self.command_options += ['--remediate'] |
567
|
|
|
return self.make_oscap_call() |
568
|
|
|
|
569
|
|
|
|
570
|
|
|
class AnsibleProfileRunner(ProfileRunner): |
571
|
|
|
def initial(self): |
572
|
|
|
self.command_options += ['--results-arf', self.arf_path] |
573
|
|
|
return super(AnsibleProfileRunner, self).initial() |
574
|
|
|
|
575
|
|
|
def remediation(self): |
576
|
|
|
formatting = self._get_formatting_dict_for_remediation() |
577
|
|
|
formatting['output_file'] = '{0}.yml'.format(self.profile) |
578
|
|
|
formatting['playbook'] = os.path.join(LogHelper.LOG_DIR, |
579
|
|
|
formatting['output_file']) |
580
|
|
|
|
581
|
|
|
return run_stage_remediation_ansible('profile', self.environment, |
582
|
|
|
formatting, |
583
|
|
|
self.verbose_path) |
584
|
|
|
|
585
|
|
|
|
586
|
|
|
class BashProfileRunner(ProfileRunner): |
587
|
|
|
def initial(self): |
588
|
|
|
self.command_options += ['--results-arf', self.arf_path] |
589
|
|
|
return super(BashProfileRunner, self).initial() |
590
|
|
|
|
591
|
|
|
def remediation(self): |
592
|
|
|
formatting = self._get_formatting_dict_for_remediation() |
593
|
|
|
formatting['output_file'] = '{0}.sh'.format(self.profile) |
594
|
|
|
|
595
|
|
|
return run_stage_remediation_bash('profile', self.environment, formatting, self.verbose_path) |
596
|
|
|
|
597
|
|
|
|
598
|
|
|
class OscapRuleRunner(RuleRunner): |
599
|
|
|
def remediation(self): |
600
|
|
|
self.command_options += ['--remediate'] |
601
|
|
|
return self.make_oscap_call() |
602
|
|
|
|
603
|
|
|
def final(self): |
604
|
|
|
""" There is no need to run final scan again - result won't be different |
605
|
|
|
to what we already have in remediation step.""" |
606
|
|
|
return True |
607
|
|
|
|
608
|
|
|
|
609
|
|
|
class BashRuleRunner(RuleRunner): |
610
|
|
|
def initial(self): |
611
|
|
|
self.command_options += ['--results-arf', self.arf_path] |
612
|
|
|
return super(BashRuleRunner, self).initial() |
613
|
|
|
|
614
|
|
|
def remediation(self): |
615
|
|
|
|
616
|
|
|
formatting = self._get_formatting_dict_for_remediation() |
617
|
|
|
formatting['output_file'] = '{0}.sh'.format(self.rule_id) |
618
|
|
|
|
619
|
|
|
success = run_stage_remediation_bash('rule', self.environment, formatting, self.verbose_path) |
620
|
|
|
return success |
621
|
|
|
|
622
|
|
|
|
623
|
|
|
class AnsibleRuleRunner(RuleRunner): |
624
|
|
|
def initial(self): |
625
|
|
|
self.command_options += ['--results-arf', self.arf_path] |
626
|
|
|
return super(AnsibleRuleRunner, self).initial() |
627
|
|
|
|
628
|
|
|
def remediation(self): |
629
|
|
|
formatting = self._get_formatting_dict_for_remediation() |
630
|
|
|
formatting['output_file'] = '{0}.yml'.format(self.rule_id) |
631
|
|
|
formatting['playbook'] = os.path.join(LogHelper.LOG_DIR, |
632
|
|
|
formatting['output_file']) |
633
|
|
|
|
634
|
|
|
success = run_stage_remediation_ansible('rule', self.environment, formatting, self.verbose_path) |
635
|
|
|
return success |
636
|
|
|
|
637
|
|
|
|
638
|
|
|
class Checker(object): |
639
|
|
|
def __init__(self, test_env): |
640
|
|
|
self.test_env = test_env |
641
|
|
|
self.executed_tests = 0 |
642
|
|
|
|
643
|
|
|
self.datastream = "" |
644
|
|
|
self.benchmark_id = "" |
645
|
|
|
self.remediate_using = "" |
646
|
|
|
self.benchmark_cpes = set() |
647
|
|
|
|
648
|
|
|
now = datetime.datetime.now() |
649
|
|
|
self.test_timestamp_str = now.strftime("%Y-%m-%d %H:%M") |
650
|
|
|
|
651
|
|
|
def test_target(self, target): |
652
|
|
|
self.start() |
653
|
|
|
try: |
654
|
|
|
self._test_target(target) |
655
|
|
|
except KeyboardInterrupt: |
656
|
|
|
logging.info("Terminating the test run due to keyboard interrupt.") |
657
|
|
|
except RuntimeError as exc: |
658
|
|
|
logging.error("Terminating due to error: {msg}.".format(msg=str(exc))) |
659
|
|
|
except TimeoutException as exc: |
660
|
|
|
logging.error("Terminating due to timeout: {msg}".format(msg=str(exc))) |
661
|
|
|
finally: |
662
|
|
|
self.finalize() |
663
|
|
|
|
664
|
|
|
def run_test_for_all_profiles(self, profiles, test_data=None): |
665
|
|
|
if len(profiles) > 1: |
666
|
|
|
with test_env.SavedState.create_from_environment(self.test_env, "prepared") as state: |
667
|
|
|
args_list = [(p, test_data) for p in profiles] |
668
|
|
|
state.map_on_top(self._run_test, args_list) |
669
|
|
|
elif profiles: |
670
|
|
|
self._run_test(profiles[0], test_data) |
671
|
|
|
|
672
|
|
|
def _test_target(self, target): |
673
|
|
|
raise NotImplementedError() |
674
|
|
|
|
675
|
|
|
def _run_test(self, profile, test_data): |
676
|
|
|
raise NotImplementedError() |
677
|
|
|
|
678
|
|
|
def start(self): |
679
|
|
|
self.executed_tests = 0 |
680
|
|
|
|
681
|
|
|
try: |
682
|
|
|
self.test_env.start() |
683
|
|
|
except Exception as exc: |
684
|
|
|
msg = ("Failed to start test environment '{0}': {1}" |
685
|
|
|
.format(self.test_env.name, str(exc))) |
686
|
|
|
raise RuntimeError(msg) |
687
|
|
|
|
688
|
|
|
def finalize(self): |
689
|
|
|
if not self.executed_tests: |
690
|
|
|
logging.error("Nothing has been tested!") |
691
|
|
|
|
692
|
|
|
try: |
693
|
|
|
self.test_env.finalize() |
694
|
|
|
except Exception as exc: |
695
|
|
|
msg = ("Failed to finalize test environment '{0}': {1}" |
696
|
|
|
.format(self.test_env.name, str(exc))) |
697
|
|
|
raise RuntimeError(msg) |
698
|
|
|
|
699
|
|
|
|
700
|
|
|
REMEDIATION_PROFILE_RUNNERS = { |
701
|
|
|
'oscap': OscapProfileRunner, |
702
|
|
|
'bash': BashProfileRunner, |
703
|
|
|
'ansible': AnsibleProfileRunner, |
704
|
|
|
} |
705
|
|
|
|
706
|
|
|
|
707
|
|
|
REMEDIATION_RULE_RUNNERS = { |
708
|
|
|
'oscap': OscapRuleRunner, |
709
|
|
|
'bash': BashRuleRunner, |
710
|
|
|
'ansible': AnsibleRuleRunner, |
711
|
|
|
} |
712
|
|
|
|
713
|
|
|
|
714
|
|
|
REMEDIATION_RUNNER_TO_REMEDIATION_MEANS = { |
715
|
|
|
'oscap': 'bash', |
716
|
|
|
'bash': 'bash', |
717
|
|
|
'ansible': 'ansible', |
718
|
|
|
} |
719
|
|
|
|