Passed
Push — master ( ea7c4a...d61422 )
by Matěj
01:18 queued 12s
created

ssg_test_suite.oscap   F

Complexity

Total Complexity 124

Size/Duplication

Total Lines 714
Duplicated Lines 0 %

Test Coverage

Coverage 0%

Importance

Changes 0
Metric Value
eloc 513
dl 0
loc 714
ccs 0
cts 437
cp 0
rs 2
c 0
b 0
f 0
wmc 124

14 Functions

Rating   Name   Duplication   Size   Complexity  
A is_virtual_oscap_profile() 0 13 5
A single_quote_string() 0 5 2
A process_profile_id() 0 9 3
A send_files_remote() 0 14 2
A save_analysis_to_json() 0 4 2
A run_stage_remediation_bash() 0 26 4
A find_result_id_in_output() 0 6 2
A send_arf_to_remote_machine_and_generate_remediations_there() 0 19 5
A generate_fixes_remotely() 0 20 3
A triage_xml_results() 0 11 2
A get_file_remote() 0 12 2
A analysis_to_serializable() 0 6 3
A get_result_id_from_arf() 0 12 3
A run_stage_remediation_ansible() 0 29 4

55 Methods

Rating   Name   Duplication   Size   Complexity  
A GenericRunner.prepare_online_scanning_arguments() 0 7 1
A GenericRunner._wait_for_continue() 0 4 1
A RuleRunner.run_stage_with_context() 0 3 1
A Checker.start() 0 9 2
A ProfileRunner.final() 0 9 2
A Checker.run_test_for_all_profiles() 0 7 4
A OscapRuleRunner.final() 0 4 1
A Checker.__init__() 0 11 1
A RuleRunner._find_rule_result_in_output() 0 18 2
A Checker.test_target() 0 12 4
A Checker._run_test() 0 2 1
A Checker.finalize() 0 10 3
A Checker._test_target() 0 2 1
A RuleRunner._get_report_file() 0 2 1
A OscapProfileRunner.remediation() 0 3 1
A OscapRuleRunner.remediation() 0 3 1
A BashRuleRunner.remediation() 0 7 1
A GenericRunner.analyze() 0 5 1
A BashProfileRunner.remediation() 0 5 1
A GenericRunner._get_report_file() 0 2 1
A GenericRunner.initial() 0 4 1
A ProfileRunner._get_report_file() 0 2 1
A GenericRunner.make_oscap_call() 0 2 1
A ProfileRunner._get_arf_file() 0 2 1
A AnsibleProfileRunner.initial() 0 3 1
A GenericRunner._make_report_path() 0 4 1
A GenericRunner._get_arf_file() 0 2 1
A BashProfileRunner.initial() 0 3 1
F GenericRunner.run_stage() 0 53 14
A AnsibleProfileRunner.remediation() 0 9 1
A GenericRunner.final() 0 4 1
A GenericRunner._get_results_file() 0 2 1
A RuleRunner.final() 0 5 1
A RuleRunner.make_oscap_call() 0 9 1
A ProfileRunner._get_verbose_file() 0 2 1
A RuleRunner._get_results_file() 0 3 1
A GenericRunner._generate_report_file() 0 5 1
A ProfileRunner.make_oscap_call() 0 11 2
A AnsibleRuleRunner.remediation() 0 8 1
A GenericRunner._get_formatting_dict_for_remediation() 0 10 1
A BashRuleRunner.initial() 0 3 1
A GenericRunner._make_arf_path() 0 3 1
A GenericRunner.get_command() 0 3 1
A RuleRunner._get_formatting_dict_for_remediation() 0 5 1
A RuleRunner._analyze_output_of_oscap_call() 0 20 3
A GenericRunner._get_verbose_file() 0 2 1
A ProfileRunner._get_results_file() 0 2 1
A GenericRunner.remediation() 0 2 1
A GenericRunner._make_results_path() 0 4 1
A RuleRunner._get_verbose_file() 0 2 1
A AnsibleRuleRunner.initial() 0 3 1
A GenericRunner.__init__() 0 25 1
A RuleRunner.__init__() 0 14 1
A GenericRunner._make_verbose_path() 0 4 1
A RuleRunner._get_arf_file() 0 2 1

How to fix   Complexity   

Complexity

Complex classes like ssg_test_suite.oscap often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
#!/usr/bin/env python2
2
from __future__ import print_function
3
4
import logging
5
import os.path
6
import re
7
import collections
8
import xml.etree.ElementTree
9
import json
10
import datetime
11
import socket
12
import sys
13
import time
14
15
from ssg.constants import OSCAP_PROFILE_ALL_ID
16
17
from ssg_test_suite.log import LogHelper
18
from ssg_test_suite import test_env
19
from ssg_test_suite import common
20
21
from ssg.shims import input_func
22
23
# Needed for compatibility as there is no TimeoutError in python2.
24
if sys.version_info[0] < 3:
25
    TimeoutException = socket.timeout
26
else:
27
    TimeoutException = TimeoutError
28
29
logging.getLogger(__name__).addHandler(logging.NullHandler())
30
31
_CONTEXT_RETURN_CODES = {'pass': 0,
32
                         'fail': 2,
33
                         'error': 1,
34
                         'notapplicable': 0,
35
                         'fixed': 0}
36
37
_ANSIBLE_TEMPLATE = 'urn:xccdf:fix:script:ansible'
38
_BASH_TEMPLATE = 'urn:xccdf:fix:script:sh'
39
_XCCDF_NS = 'http://checklists.nist.gov/xccdf/1.2'
40
41
42
PROFILE_ALL_ID_SINGLE_QUOTED = False
43
44
45
def analysis_to_serializable(analysis):
46
    result = dict(analysis)
47
    for key, value in analysis.items():
48
        if type(value) == set:
49
            result[key] = tuple(value)
50
    return result
51
52
53
def save_analysis_to_json(analysis, output_fname):
54
    analysis2 = analysis_to_serializable(analysis)
55
    with open(output_fname, "w") as f:
56
        json.dump(analysis2, f)
57
58
59
def triage_xml_results(fname):
60
    tree = xml.etree.ElementTree.parse(fname)
61
    all_xml_results = tree.findall(".//{%s}rule-result" % _XCCDF_NS)
62
63
    triaged = collections.defaultdict(set)
64
    for result in list(all_xml_results):
65
        idref = result.get("idref")
66
        status = result.find("{%s}result" % _XCCDF_NS).text
67
        triaged[status].add(idref)
68
69
    return triaged
70
71
72
def send_files_remote(verbose_path, remote_dir, domain_ip, *files):
73
    """Upload files to VM."""
74
    # files is a list of absolute paths on the host
75
    success = True
76
    destination = 'root@{0}:{1}'.format(domain_ip, remote_dir)
77
    files_string = ' '.join(files)
78
79
    logging.debug('Uploading files {0} to {1}'.format(files_string,
80
                                                      destination))
81
    command = ['scp'] + list(common.SSH_ADDITIONAL_OPTS) + list(files) + [destination]
82
    if common.run_cmd_local(command, verbose_path)[0] != 0:
83
        logging.error('Failed to upload files {0}'.format(files_string))
84
        success = False
85
    return success
86
87
88
def get_file_remote(verbose_path, local_dir, domain_ip, remote_path):
89
    """Download a file from VM."""
90
    # remote_path is an absolute path of a file on remote machine
91
    success = True
92
    source = 'root@{0}:{1}'.format(domain_ip, remote_path)
93
    logging.debug('Downloading file {0} to {1}'
94
                  .format(source, local_dir))
95
    command = ['scp'] + list(common.SSH_ADDITIONAL_OPTS) + [source, local_dir]
96
    if common.run_cmd_local(command, verbose_path)[0] != 0:
97
        logging.error('Failed to download file {0}'.format(remote_path))
98
        success = False
99
    return success
100
101
102
def find_result_id_in_output(output):
103
    match = re.search('result id.*$', output, re.IGNORECASE | re.MULTILINE)
104
    if match is None:
105
        return None
106
    # Return the right most word of the match which is the result id.
107
    return match.group(0).split()[-1]
108
109
110
def get_result_id_from_arf(arf_path, verbose_path):
111
    command = ['oscap', 'info', arf_path]
112
    command_string = ' '.join(command)
113
    returncode, output = common.run_cmd_local(command, verbose_path)
114
    if returncode != 0:
115
        raise RuntimeError('{0} returned {1} exit code'.
116
                           format(command_string, returncode))
117
    res_id = find_result_id_in_output(output)
118
    if res_id is None:
119
        raise RuntimeError('Failed to find result ID in {0}'
120
                           .format(arf_path))
121
    return res_id
122
123
124
def single_quote_string(input):
125
    result = input
126
    for char in "\"'":
127
        result = result.replace(char, "")
128
    return "'{}'".format(result)
129
130
131
def generate_fixes_remotely(formatting, verbose_path):
132
    command_base = ['oscap', 'xccdf', 'generate', 'fix']
133
    command_options = [
134
        '--benchmark-id', formatting['benchmark_id'],
135
        '--profile', formatting['profile'],
136
        '--template', formatting['output_template'],
137
        '--output', '/{output_file}'.format(** formatting),
138
    ]
139
    command_operands = ['/{arf_file}'.format(** formatting)]
140
    if 'result_id' in formatting:
141
        command_options.extend(['--result-id', formatting['result_id']])
142
143
    command_components = command_base + command_options + command_operands
144
    command_string = ' '.join([single_quote_string(c) for c in command_components])
145
    rc, stdout = common.run_cmd_remote(
146
        command_string, formatting['domain_ip'], verbose_path)
147
    if rc != 0:
148
        msg = ('Command {0} ended with return code {1} (expected 0).'
149
               .format(command_string, rc))
150
        raise RuntimeError(msg)
151
152
153
def run_stage_remediation_ansible(run_type, formatting, verbose_path):
154
    """
155
       Returns False on error, or True in case of successful Ansible playbook
156
       run."""
157
    formatting['output_template'] = _ANSIBLE_TEMPLATE
158
    send_arf_to_remote_machine_and_generate_remediations_there(
159
        run_type, formatting, verbose_path)
160
    if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
161
                           formatting['domain_ip'],
162
                           '/' + formatting['output_file']):
163
        return False
164
    command = (
165
        'ansible-playbook', '-v', '-i', '{0},'.format(formatting['domain_ip']),
166
        '-u' 'root', '--ssh-common-args={0}'.format(' '.join(list(common.SSH_ADDITIONAL_OPTS))),
167
        formatting['playbook'])
168
    command_string = ' '.join(command)
169
    returncode, output = common.run_cmd_local(command, verbose_path)
170
    # Appends output of ansible-playbook to the verbose_path file.
171
    with open(verbose_path, 'ab') as f:
172
        f.write('Stdout of "{}":'.format(command_string).encode("utf-8"))
173
        f.write(output.encode("utf-8"))
174
    if returncode != 0:
175
        msg = (
176
            'Ansible playbook remediation run has '
177
            'exited with return code {} instead of expected 0'
178
            .format(returncode))
179
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
180
        return False
181
    return True
182
183
184
def run_stage_remediation_bash(run_type, formatting, verbose_path):
185
    """
186
       Returns False on error, or True in case of successful bash scripts
187
       run."""
188
    formatting['output_template'] = _BASH_TEMPLATE
189
    send_arf_to_remote_machine_and_generate_remediations_there(
190
        run_type, formatting, verbose_path)
191
    if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
192
                           formatting['domain_ip'],
193
                           '/' + formatting['output_file']):
194
        return False
195
196
    command_string = '/bin/bash -x /{output_file}'.format(** formatting)
197
    returncode, output = common.run_cmd_remote(
198
        command_string, formatting['domain_ip'], verbose_path)
199
    # Appends output of script execution to the verbose_path file.
200
    with open(verbose_path, 'ab') as f:
201
        f.write('Stdout of "{}":'.format(command_string).encode("utf-8"))
202
        f.write(output.encode("utf-8"))
203
    if returncode != 0:
204
        msg = (
205
            'Bash script remediation run has exited with return code {} '
206
            'instead of expected 0'.format(returncode))
207
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
208
        return False
209
    return True
210
211
212
def send_arf_to_remote_machine_and_generate_remediations_there(
213
        run_type, formatting, verbose_path):
214
    if run_type == 'rule':
215
        try:
216
            res_id = get_result_id_from_arf(formatting['arf'], verbose_path)
217
        except Exception as exc:
218
            logging.error(str(exc))
219
            return False
220
        formatting['result_id'] = res_id
221
222
    if not send_files_remote(
223
            verbose_path, '/', formatting['domain_ip'], formatting['arf']):
224
        return False
225
226
    try:
227
        generate_fixes_remotely(formatting, verbose_path)
228
    except Exception as exc:
229
        logging.error(str(exc))
230
        return False
231
232
233
def is_virtual_oscap_profile(profile):
234
    """ Test if the profile belongs to the so called category virtual
235
        from OpenSCAP available profiles. It can be (all) or other id we
236
        might come up in the future, it just needs to be encapsulated
237
        with parenthesis for example "(custom_profile)".
238
    """
239
    if profile is not None:
240
        if profile == OSCAP_PROFILE_ALL_ID:
241
            return True
242
        else:
243
            if "(" == profile[:1] and ")" == profile[-1:]:
244
                return True
245
    return False
246
247
248
def process_profile_id(profile):
249
    # Detect if the profile is virtual and include single quotes if needed.
250
    if is_virtual_oscap_profile(profile):
251
        if PROFILE_ALL_ID_SINGLE_QUOTED:
252
            return "'{}'".format(profile)
253
        else:
254
            return profile
255
    else:
256
        return profile
257
258
259
class GenericRunner(object):
260
    def __init__(self, environment, profile, datastream, benchmark_id):
261
        self.environment = environment
262
        self.profile = profile
263
        self.datastream = datastream
264
        self.benchmark_id = benchmark_id
265
266
        self.arf_file = ''
267
        self.arf_path = ''
268
        self.verbose_path = ''
269
        self.report_path = ''
270
        self.results_path = ''
271
        self.stage = 'undefined'
272
273
        self.clean_files = False
274
        self.manual_debug = False
275
        self._filenames_to_clean_afterwards = set()
276
277
        self.command_base = []
278
        self.command_options = []
279
        self.command_operands = []
280
        # number of seconds to sleep after reboot of vm to let
281
        # the system to finish startup, there were problems with
282
        # temporary files created by Dracut during image generation interfering
283
        # with the scan
284
        self.time_to_finish_startup = 30
285
286
    def _make_arf_path(self):
287
        self.arf_file = self._get_arf_file()
288
        self.arf_path = os.path.join(LogHelper.LOG_DIR, self.arf_file)
289
290
    def _get_arf_file(self):
291
        raise NotImplementedError()
292
293
    def _make_verbose_path(self):
294
        verbose_file = self._get_verbose_file()
295
        verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_file)
296
        self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')
297
298
    def _get_verbose_file(self):
299
        raise NotImplementedError()
300
301
    def _make_report_path(self):
302
        report_file = self._get_report_file()
303
        report_path = os.path.join(LogHelper.LOG_DIR, report_file)
304
        self.report_path = LogHelper.find_name(report_path, '.html')
305
306
    def _get_report_file(self):
307
        raise NotImplementedError()
308
309
    def _make_results_path(self):
310
        results_file = self._get_results_file()
311
        results_path = os.path.join(LogHelper.LOG_DIR, results_file)
312
        self.results_path = LogHelper.find_name(results_path, '.xml')
313
314
    def _get_results_file(self):
315
        raise NotImplementedError()
316
317
    def _generate_report_file(self):
318
        self.command_options.extend([
319
            '--report', self.report_path,
320
        ])
321
        self._filenames_to_clean_afterwards.add(self.report_path)
322
323
    def _wait_for_continue(self):
324
        """ In case user requests to leave machine in failed state for hands
325
        on debugging, ask for keypress to continue."""
326
        input_func("Paused for manual debugging. Continue by pressing return.")
327
328
    def prepare_online_scanning_arguments(self):
329
        self.command_options.extend([
330
            '--benchmark-id', self.benchmark_id,
331
            '--profile', self.profile,
332
            '--progress', '--oval-results',
333
        ])
334
        self.command_operands.append(self.datastream)
335
336
    def run_stage(self, stage):
337
        self.stage = stage
338
339
        self._make_verbose_path()
340
        self._make_report_path()
341
        self._make_arf_path()
342
        self._make_results_path()
343
344
        self.command_base = []
345
        self.command_options = ['--verbose', 'DEVEL']
346
        self.command_operands = []
347
348
        result = None
349
        if stage == 'initial':
350
            result = self.initial()
351
        elif stage == 'remediation':
352
            result = self.remediation()
353
        elif stage == 'final':
354
            result = self.final()
355
        else:
356
            raise RuntimeError('Unknown stage: {}.'.format(stage))
357
358
        if self.clean_files:
359
            for fname in tuple(self._filenames_to_clean_afterwards):
360
                try:
361
                    os.remove(fname)
362
                except OSError:
363
                    logging.error(
364
                        "Failed to cleanup file '{0}'"
365
                        .format(fname))
366
                finally:
367
                    self._filenames_to_clean_afterwards.remove(fname)
368
369
        if result:
370
            LogHelper.log_preloaded('pass')
371
            if self.clean_files:
372
                files_to_remove = [self.verbose_path]
373
                if stage in ['initial', 'final']:
374
                    files_to_remove.append(self.results_path)
375
376
                for fname in tuple(files_to_remove):
377
                    try:
378
                        if os.path.exists(fname):
379
                            os.remove(fname)
380
                    except OSError:
381
                        logging.error(
382
                            "Failed to cleanup file '{0}'"
383
                            .format(fname))
384
        else:
385
            LogHelper.log_preloaded('fail')
386
            if self.manual_debug:
387
                self._wait_for_continue()
388
        return result
389
390
    @property
391
    def get_command(self):
392
        return self.command_base + self.command_options + self.command_operands
393
394
    def make_oscap_call(self):
395
        raise NotImplementedError()
396
397
    def initial(self):
398
        self.command_options += ['--results', self.results_path]
399
        result = self.make_oscap_call()
400
        return result
401
402
    def remediation(self):
403
        raise NotImplementedError()
404
405
    def final(self):
406
        self.command_options += ['--results', self.results_path]
407
        result = self.make_oscap_call()
408
        return result
409
410
    def analyze(self, stage):
411
        triaged_results = triage_xml_results(self.results_path)
412
        triaged_results["stage"] = stage
413
        triaged_results["runner"] = self.__class__.__name__
414
        return triaged_results
415
416
    def _get_formatting_dict_for_remediation(self):
417
        formatting = {
418
            'domain_ip': self.environment.domain_ip,
419
            'profile': self.profile,
420
            'datastream': self.datastream,
421
            'benchmark_id': self.benchmark_id
422
        }
423
        formatting['arf'] = self.arf_path
424
        formatting['arf_file'] = self.arf_file
425
        return formatting
426
427
428
class ProfileRunner(GenericRunner):
429
    def _get_arf_file(self):
430
        return '{0}-initial-arf.xml'.format(self.profile)
431
432
    def _get_verbose_file(self):
433
        return '{0}-{1}'.format(self.profile, self.stage)
434
435
    def _get_report_file(self):
436
        return '{0}-{1}'.format(self.profile, self.stage)
437
438
    def _get_results_file(self):
439
        return '{0}-{1}-results'.format(self.profile, self.stage)
440
441
    def final(self):
442
        if self.environment.name == 'libvirt-based':
443
            logging.info("Rebooting domain '{0}' before final scan."
444
                         .format(self.environment.domain_name))
445
            self.environment.reboot()
446
            logging.info("Waiting for {0} seconds to let the system finish startup."
447
                         .format(self.time_to_finish_startup))
448
            time.sleep(self.time_to_finish_startup)
449
        return GenericRunner.final(self)
450
451
    def make_oscap_call(self):
452
        self.prepare_online_scanning_arguments()
453
        self._generate_report_file()
454
        returncode, self._oscap_output = self.environment.scan(
455
            self.command_options + self.command_operands, self.verbose_path)
456
457
        if returncode not in [0, 2]:
458
            logging.error(('Profile run should end with return code 0 or 2 '
459
                           'not "{0}" as it did!').format(returncode))
460
            return False
461
        return True
462
463
464
class RuleRunner(GenericRunner):
465
    def __init__(
466
            self, environment, profile, datastream, benchmark_id,
467
            rule_id, script_name, dont_clean, manual_debug):
468
        super(RuleRunner, self).__init__(
469
            environment, profile, datastream, benchmark_id,
470
        )
471
472
        self.rule_id = rule_id
473
        self.context = None
474
        self.script_name = script_name
475
        self.clean_files = not dont_clean
476
        self.manual_debug = manual_debug
477
478
        self._oscap_output = ''
479
480
    def _get_arf_file(self):
481
        return '{0}-initial-arf.xml'.format(self.rule_id)
482
483
    def _get_verbose_file(self):
484
        return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage)
485
486
    def _get_report_file(self):
487
        return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage)
488
489
    def _get_results_file(self):
490
        return '{0}-{1}-{2}-results-{3}'.format(
491
            self.rule_id, self.script_name, self.profile, self.stage)
492
493
    def make_oscap_call(self):
494
        self.prepare_online_scanning_arguments()
495
        self._generate_report_file()
496
        self.command_options.extend(
497
            ['--rule', self.rule_id])
498
        returncode, self._oscap_output = self.environment.scan(
499
            self.command_options + self.command_operands, self.verbose_path)
500
501
        return self._analyze_output_of_oscap_call()
502
503
    def final(self):
504
        success = super(RuleRunner, self).final()
505
        success = success and self._analyze_output_of_oscap_call()
506
507
        return success
508
509
    def _find_rule_result_in_output(self):
510
        # oscap --progress options outputs rule results to stdout in
511
        # following format:
512
        # xccdf_org....rule_accounts_password_minlen_login_defs:pass
513
        match = re.findall('{0}:(.*)$'.format(self.rule_id),
514
                           self._oscap_output,
515
                           re.MULTILINE)
516
517
        if not match:
518
            # When the rule is not selected, it won't match in output
519
            return "notselected"
520
521
        # When --remediation is executed, there will be two entries in
522
        # progress output, one for fail, and one for fixed, e.g.
523
        # xccdf_org....rule_accounts_password_minlen_login_defs:fail
524
        # xccdf_org....rule_accounts_password_minlen_login_defs:fixed
525
        # We are interested in the last one
526
        return match[-1]
527
528
    def _analyze_output_of_oscap_call(self):
529
        local_success = True
530
        # check expected result
531
        rule_result = self._find_rule_result_in_output()
532
533
        if rule_result != self.context:
534
            local_success = False
535
            if rule_result == 'notselected':
536
                msg = (
537
                    'Rule {0} has not been evaluated! '
538
                    'Wrong profile selected in test scenario?'
539
                    .format(self.rule_id))
540
            else:
541
                msg = (
542
                    'Rule evaluation resulted in {0}, '
543
                    'instead of expected {1} during {2} stage '
544
                    .format(rule_result, self.context, self.stage)
545
                )
546
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
547
        return local_success
548
549
    def _get_formatting_dict_for_remediation(self):
550
        fmt = super(RuleRunner, self)._get_formatting_dict_for_remediation()
551
        fmt['rule_id'] = self.rule_id
552
553
        return fmt
554
555
    def run_stage_with_context(self, stage, context):
556
        self.context = context
557
        return self.run_stage(stage)
558
559
560
class OscapProfileRunner(ProfileRunner):
561
    def remediation(self):
562
        self.command_options += ['--remediate']
563
        return self.make_oscap_call()
564
565
566
class AnsibleProfileRunner(ProfileRunner):
567
    def initial(self):
568
        self.command_options += ['--results-arf', self.arf_path]
569
        return super(AnsibleProfileRunner, self).initial()
570
571
    def remediation(self):
572
        formatting = self._get_formatting_dict_for_remediation()
573
        formatting['output_file'] = '{0}.yml'.format(self.profile)
574
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
575
                                              formatting['output_file'])
576
577
        return run_stage_remediation_ansible('profile',
578
                                             formatting,
579
                                             self.verbose_path)
580
581
582
class BashProfileRunner(ProfileRunner):
583
    def initial(self):
584
        self.command_options += ['--results-arf', self.arf_path]
585
        return super(BashProfileRunner, self).initial()
586
587
    def remediation(self):
588
        formatting = self._get_formatting_dict_for_remediation()
589
        formatting['output_file'] = '{0}.sh'.format(self.profile)
590
591
        return run_stage_remediation_bash('profile', formatting, self.verbose_path)
592
593
594
class OscapRuleRunner(RuleRunner):
595
    def remediation(self):
596
        self.command_options += ['--remediate']
597
        return self.make_oscap_call()
598
599
    def final(self):
600
        """ There is no need to run final scan again - result won't be different
601
        to what we already have in remediation step."""
602
        return True
603
604
605
class BashRuleRunner(RuleRunner):
606
    def initial(self):
607
        self.command_options += ['--results-arf', self.arf_path]
608
        return super(BashRuleRunner, self).initial()
609
610
    def remediation(self):
611
612
        formatting = self._get_formatting_dict_for_remediation()
613
        formatting['output_file'] = '{0}.sh'.format(self.rule_id)
614
615
        success = run_stage_remediation_bash('rule', formatting, self.verbose_path)
616
        return success
617
618
619
class AnsibleRuleRunner(RuleRunner):
620
    def initial(self):
621
        self.command_options += ['--results-arf', self.arf_path]
622
        return super(AnsibleRuleRunner, self).initial()
623
624
    def remediation(self):
625
        formatting = self._get_formatting_dict_for_remediation()
626
        formatting['output_file'] = '{0}.yml'.format(self.rule_id)
627
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
628
                                              formatting['output_file'])
629
630
        success = run_stage_remediation_ansible('rule', formatting, self.verbose_path)
631
        return success
632
633
634
class Checker(object):
635
    def __init__(self, test_env):
636
        self.test_env = test_env
637
        self.executed_tests = 0
638
639
        self.datastream = ""
640
        self.benchmark_id = ""
641
        self.remediate_using = ""
642
        self.benchmark_cpes = set()
643
644
        now = datetime.datetime.now()
645
        self.test_timestamp_str = now.strftime("%Y-%m-%d %H:%M")
646
647
    def test_target(self, target):
648
        self.start()
649
        try:
650
            self._test_target(target)
651
        except KeyboardInterrupt:
652
            logging.info("Terminating the test run due to keyboard interrupt.")
653
        except RuntimeError as exc:
654
            logging.error("Terminating due to error: {msg}.".format(msg=str(exc)))
655
        except TimeoutException as exc:
656
            logging.error("Terminating due to timeout: {msg}".format(msg=str(exc)))
657
        finally:
658
            self.finalize()
659
660
    def run_test_for_all_profiles(self, profiles, test_data=None):
661
        if len(profiles) > 1:
662
            with test_env.SavedState.create_from_environment(self.test_env, "prepared") as state:
663
                args_list = [(p, test_data) for p in profiles]
664
                state.map_on_top(self._run_test, args_list)
665
        elif profiles:
666
            self._run_test(profiles[0], test_data)
667
668
    def _test_target(self, target):
669
        raise NotImplementedError()
670
671
    def _run_test(self, profile, test_data):
672
        raise NotImplementedError()
673
674
    def start(self):
675
        self.executed_tests = 0
676
677
        try:
678
            self.test_env.start()
679
        except Exception as exc:
680
            msg = ("Failed to start test environment '{0}': {1}"
681
                   .format(self.test_env.name, str(exc)))
682
            raise RuntimeError(msg)
683
684
    def finalize(self):
685
        if not self.executed_tests:
686
            logging.error("Nothing has been tested!")
687
688
        try:
689
            self.test_env.finalize()
690
        except Exception as exc:
691
            msg = ("Failed to finalize test environment '{0}': {1}"
692
                   .format(self.test_env.name, str(exc)))
693
            raise RuntimeError(msg)
694
695
696
REMEDIATION_PROFILE_RUNNERS = {
697
    'oscap': OscapProfileRunner,
698
    'bash': BashProfileRunner,
699
    'ansible': AnsibleProfileRunner,
700
}
701
702
703
REMEDIATION_RULE_RUNNERS = {
704
    'oscap': OscapRuleRunner,
705
    'bash': BashRuleRunner,
706
    'ansible': AnsibleRuleRunner,
707
}
708
709
710
REMEDIATION_RUNNER_TO_REMEDIATION_MEANS = {
711
    'oscap': 'bash',
712
    'bash': 'bash',
713
    'ansible': 'ansible',
714
}
715