tests.ssg_test_suite.oscap   F
last analyzed

Complexity

Total Complexity 141

Size/Duplication

Total Lines 747
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 537
dl 0
loc 747
rs 2
c 0
b 0
f 0
wmc 141

60 Methods

Rating   Name   Duplication   Size   Complexity  
A GenericRunner._wait_for_continue() 0 4 1
A RuleRunner.make_oscap_call() 0 13 3
A GenericRunner.prepare_online_scanning_arguments() 0 7 1
A GenericRunner.final() 0 5 3
A RuleRunner._get_formatting_dict_for_remediation() 0 5 1
A GenericRunner.analyze() 0 5 1
A Checker.run_test_for_all_profiles() 0 7 4
A Checker.__init__() 0 11 1
A Checker.finalize() 0 10 3
A RuleRunner.run_stage_with_context() 0 3 1
A AnsibleProfileRunner.initial() 0 3 1
A AnsibleRuleRunner.remediation() 0 8 1
A AnsibleRuleRunner.initial() 0 3 1
A ProfileRunner._get_verbose_basename() 0 2 1
A GenericRunner._generate_report_file() 0 5 1
A GenericRunner._remove_files_to_clean() 0 12 5
A GenericRunner._get_formatting_dict_for_remediation() 0 10 1
A ProfileRunner.make_oscap_call() 0 14 3
A GenericRunner.make_oscap_call() 0 2 1
A Checker.test_target() 0 12 4
A GenericRunner.remediation() 0 2 1
A OscapProfileRunner.remediation() 0 3 1
A RuleRunner._get_report_basename() 0 2 1
A GenericRunner.get_command() 0 3 1
A ProfileRunner.final() 0 9 2
A ProfileRunner._get_results_basename() 0 2 1
A RuleRunner.__init__() 0 16 1
A GenericRunner.__init__() 0 26 1
A OscapRuleRunner.final() 0 4 1
A GenericRunner.initial() 0 5 3
A OscapRuleRunner.remediation() 0 4 1
A ProfileRunner._get_report_basename() 0 2 1
A GenericRunner.__exit__() 0 2 1
A Checker._run_test() 0 2 1
A GenericRunner._get_arf_basename() 0 2 1
A GenericRunner._get_report_basename() 0 2 1
A RuleRunner._get_verbose_basename() 0 2 1
A BashRuleRunner.initial() 0 3 1
A ProfileRunner._get_arf_basename() 0 4 2
A RuleRunner._get_results_basename() 0 3 1
A GenericRunner._make_verbose_path() 0 4 1
A RuleRunner._analyze_output_of_oscap_call() 0 27 4
A Checker._test_target() 0 2 1
A GenericRunner._make_results_path() 0 4 1
A Checker.start() 0 9 2
A BashProfileRunner.initial() 0 3 1
C GenericRunner.run_stage() 0 39 9
A GenericRunner._make_arf_path() 0 3 1
A GenericRunner._make_report_path() 0 4 1
A RuleRunner._find_rule_result_in_output() 0 18 2
A RuleRunner.final() 0 5 1
A RuleRunner._get_arf_basename() 0 4 2
A GenericRunner.__enter__() 0 2 1
A BashProfileRunner.remediation() 0 5 1
A GenericRunner._get_verbose_basename() 0 2 1
A AnsibleProfileRunner.remediation() 0 9 1
A RuleRunner._get_initial_arf_path() 0 2 1
A ProfileRunner._get_initial_arf_path() 0 2 1
A BashRuleRunner.remediation() 0 7 1
A GenericRunner._get_results_basename() 0 2 1

14 Functions

Rating   Name   Duplication   Size   Complexity  
B send_arf_to_remote_machine_and_generate_remediations_there() 0 21 6
A get_result_id_from_arf() 0 12 3
A analysis_to_serializable() 0 6 3
A generate_fixes_remotely() 0 16 3
A is_virtual_oscap_profile() 0 13 5
A _get_bash_remediation_error_message_template() 0 17 3
A run_stage_remediation_bash() 0 22 4
A save_analysis_to_json() 0 4 2
A single_quote_string() 0 5 2
A run_stage_remediation_ansible() 0 28 4
A triage_xml_results() 0 11 2
A find_result_id_in_output() 0 6 2
A process_profile_id() 0 9 3
A get_file_remote() 0 13 3

How to fix   Complexity   

Complexity

Complex classes like tests.ssg_test_suite.oscap often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
#!/usr/bin/python3
2
from __future__ import print_function
3
4
import collections
5
import datetime
6
import json
7
import logging
8
import os.path
9
import re
10
import socket
11
import subprocess
12
import sys
13
import time
14
import xml.etree.ElementTree
15
16
from ssg.constants import OSCAP_PROFILE_ALL_ID
17
18
from ssg_test_suite.log import LogHelper
19
from ssg_test_suite import test_env
20
from ssg_test_suite import common
21
22
from ssg.shims import input_func
23
24
# Needed for compatibility as there is no TimeoutError in python2.
25
if sys.version_info[0] < 3:
26
    TimeoutException = socket.timeout
27
else:
28
    TimeoutException = TimeoutError
29
30
logging.getLogger(__name__).addHandler(logging.NullHandler())
31
32
_CONTEXT_RETURN_CODES = {'pass': 0,
33
                         'fail': 2,
34
                         'error': 1,
35
                         'notapplicable': 0,
36
                         'fixed': 0}
37
38
_ANSIBLE_TEMPLATE = 'urn:xccdf:fix:script:ansible'
39
_BASH_TEMPLATE = 'urn:xccdf:fix:script:sh'
40
_XCCDF_NS = 'http://checklists.nist.gov/xccdf/1.2'
41
42
43
PROFILE_ALL_ID_SINGLE_QUOTED = False
44
45
46
def analysis_to_serializable(analysis):
47
    result = dict(analysis)
48
    for key, value in analysis.items():
49
        if type(value) == set:
50
            result[key] = tuple(value)
51
    return result
52
53
54
def save_analysis_to_json(analysis, output_fname):
55
    analysis2 = analysis_to_serializable(analysis)
56
    with open(output_fname, "w") as f:
57
        json.dump(analysis2, f)
58
59
60
def triage_xml_results(fname):
61
    tree = xml.etree.ElementTree.parse(fname)
62
    all_xml_results = tree.findall(".//{%s}rule-result" % _XCCDF_NS)
63
64
    triaged = collections.defaultdict(set)
65
    for result in list(all_xml_results):
66
        idref = result.get("idref")
67
        status = result.find("{%s}result" % _XCCDF_NS).text
68
        triaged[status].add(idref)
69
70
    return triaged
71
72
73
def get_file_remote(test_env, verbose_path, local_dir, remote_path):
74
    """Download a file from VM."""
75
    # remote_path is an absolute path of a file on remote machine
76
    success = True
77
    logging.debug('Downloading remote file {0} to {1}'
78
                  .format(remote_path, local_dir))
79
    with open(verbose_path, "a") as log_file:
80
        try:
81
            test_env.scp_download_file(remote_path, local_dir, log_file)
82
        except Exception:
83
            logging.error('Failed to download file {0}'.format(remote_path))
84
            success = False
85
    return success
86
87
88
def find_result_id_in_output(output):
89
    match = re.search('result id.*$', output, re.IGNORECASE | re.MULTILINE)
90
    if match is None:
91
        return None
92
    # Return the right most word of the match which is the result id.
93
    return match.group(0).split()[-1]
94
95
96
def get_result_id_from_arf(arf_path, verbose_path):
97
    command = ['oscap', 'info', arf_path]
98
    command_string = ' '.join(command)
99
    returncode, output = common.run_cmd_local(command, verbose_path)
100
    if returncode != 0:
101
        raise RuntimeError('{0} returned {1} exit code'.
102
                           format(command_string, returncode))
103
    res_id = find_result_id_in_output(output)
104
    if res_id is None:
105
        raise RuntimeError('Failed to find result ID in {0}'
106
                           .format(arf_path))
107
    return res_id
108
109
110
def single_quote_string(input):
111
    result = input
112
    for char in "\"'":
113
        result = result.replace(char, "")
114
    return "'{}'".format(result)
115
116
117
def generate_fixes_remotely(test_env, formatting, verbose_path):
118
    command_base = ['oscap', 'xccdf', 'generate', 'fix']
119
    command_options = [
120
        '--benchmark-id', formatting['benchmark_id'],
121
        '--profile', formatting['profile'],
122
        '--template', formatting['output_template'],
123
        '--output', '/{output_file}'.format(** formatting),
124
    ]
125
    command_operands = ['/{source_arf_basename}'.format(** formatting)]
126
    if 'result_id' in formatting:
127
        command_options.extend(['--result-id', formatting['result_id']])
128
129
    command_components = command_base + command_options + command_operands
130
    command_string = ' '.join([single_quote_string(c) for c in command_components])
131
    with open(verbose_path, "a") as log_file:
132
        test_env.execute_ssh_command(command_string, log_file)
133
134
135
def run_stage_remediation_ansible(run_type, test_env, formatting, verbose_path):
136
    """
137
       Returns False on error, or True in case of successful Ansible playbook
138
       run."""
139
    formatting['output_template'] = _ANSIBLE_TEMPLATE
140
    send_arf_to_remote_machine_and_generate_remediations_there(
141
        run_type, test_env, formatting, verbose_path)
142
    if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
143
                           '/' + formatting['output_file']):
144
        return False
145
    command = (
146
        'ansible-playbook', '-vvv', '-i', '{0},'.format(formatting['domain_ip']),
147
        '-u' 'root', '--ssh-common-args={0}'.format(' '.join(test_env.ssh_additional_options)),
148
        formatting['playbook'])
149
    command_string = ' '.join(command)
150
    returncode, output = common.run_cmd_local(command, verbose_path)
151
    # Appends output of ansible-playbook to the verbose_path file.
152
    with open(verbose_path, 'ab') as f:
153
        f.write('Stdout of "{}":'.format(command_string).encode("utf-8"))
154
        f.write(output.encode("utf-8"))
155
    if returncode != 0:
156
        msg = (
157
            'Ansible playbook remediation run has '
158
            'exited with return code {} instead of expected 0'
159
            .format(returncode))
160
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
161
        return False
162
    return True
163
164
165
def _get_bash_remediation_error_message_template(formatting):
166
    if "rule_id" in formatting:
167
        result = (
168
            'Bash remediation for rule {rule_id} '.format(** formatting) +
169
            'has exited with these errors:\n{stderr}'
170
        )
171
    elif "profile" in formatting:
172
        result = (
173
            'Bash remediation for profile {profile} '.format(** formatting) +
174
            'has exited with these errors:\n{stderr}'
175
        )
176
    else:
177
        msg = (
178
            "There was an error during remediation, but the remediation context "
179
            "is unknown, which indicates a problem in the test suite.")
180
        raise RuntimeError(msg)
181
    return result
182
183
184
def run_stage_remediation_bash(run_type, test_env, formatting, verbose_path):
185
    """
186
       Returns False on error, or True in case of successful bash scripts
187
       run."""
188
    formatting['output_template'] = _BASH_TEMPLATE
189
    send_arf_to_remote_machine_and_generate_remediations_there(
190
        run_type, test_env, formatting, verbose_path)
191
    if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
192
                           '/' + formatting['output_file']):
193
        return False
194
195
    command_string = '/bin/bash -x /{output_file}'.format(** formatting)
196
197
    with open(verbose_path, "a") as log_file:
198
        error_msg_template = _get_bash_remediation_error_message_template(formatting)
199
        try:
200
            test_env.execute_ssh_command(
201
                command_string, log_file, error_msg_template=error_msg_template)
202
        except Exception as exc:
203
            LogHelper.preload_log(logging.ERROR, str(exc), 'fail')
204
            return False
205
    return True
206
207
208
def send_arf_to_remote_machine_and_generate_remediations_there(
209
        run_type, test_env, formatting, verbose_path):
210
    if run_type == 'rule':
211
        try:
212
            res_id = get_result_id_from_arf(formatting['source_arf'], verbose_path)
213
        except Exception as exc:
214
            logging.error(str(exc))
215
            return False
216
        formatting['result_id'] = res_id
217
218
    with open(verbose_path, "a") as log_file:
219
        try:
220
            test_env.scp_upload_file(formatting["source_arf"], "/", log_file)
221
        except Exception:
222
            return False
223
224
    try:
225
        generate_fixes_remotely(test_env, formatting, verbose_path)
226
    except Exception as exc:
227
        logging.error(str(exc))
228
        return False
229
230
231
def is_virtual_oscap_profile(profile):
232
    """ Test if the profile belongs to the so called category virtual
233
        from OpenSCAP available profiles. It can be (all) or other id we
234
        might come up in the future, it just needs to be encapsulated
235
        with parenthesis for example "(custom_profile)".
236
    """
237
    if profile is not None:
238
        if profile == OSCAP_PROFILE_ALL_ID:
239
            return True
240
        else:
241
            if "(" == profile[:1] and ")" == profile[-1:]:
242
                return True
243
    return False
244
245
246
def process_profile_id(profile):
247
    # Detect if the profile is virtual and include single quotes if needed.
248
    if is_virtual_oscap_profile(profile):
249
        if PROFILE_ALL_ID_SINGLE_QUOTED:
250
            return "'{}'".format(profile)
251
        else:
252
            return profile
253
    else:
254
        return profile
255
256
257
class GenericRunner(object):
258
    def __init__(self, environment, profile, datastream, benchmark_id):
259
        self.environment = environment
260
        self.profile = profile
261
        self.datastream = datastream
262
        self.benchmark_id = benchmark_id
263
264
        self.arf_basename = ''
265
        self.arf_path = ''
266
        self.verbose_path = ''
267
        self.report_path = ''
268
        self.results_path = ''
269
        self.stage = 'undefined'
270
271
        self.clean_files = False
272
        self.create_reports = True
273
        self.manual_debug = False
274
        self._filenames_to_clean_afterwards = set()
275
276
        self.command_base = []
277
        self.command_options = []
278
        self.command_operands = []
279
        # number of seconds to sleep after reboot of vm to let
280
        # the system to finish startup, there were problems with
281
        # temporary files created by Dracut during image generation interfering
282
        # with the scan
283
        self.time_to_finish_startup = 30
284
285
    def __enter__(self):
286
        return self
287
288
    def __exit__(self, type, value, traceback):
289
        self._remove_files_to_clean()
290
291
    def _make_arf_path(self):
292
        self.arf_basename = self._get_arf_basename()
293
        self.arf_path = os.path.join(LogHelper.LOG_DIR, self.arf_basename)
294
295
    def _get_arf_basename(self):
296
        raise NotImplementedError()
297
298
    def _make_verbose_path(self):
299
        verbose_basename = self._get_verbose_basename()
300
        verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_basename)
301
        self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')
302
303
    def _get_verbose_basename(self):
304
        raise NotImplementedError()
305
306
    def _make_report_path(self):
307
        report_basename = self._get_report_basename()
308
        report_path = os.path.join(LogHelper.LOG_DIR, report_basename)
309
        self.report_path = LogHelper.find_name(report_path, '.html')
310
311
    def _get_report_basename(self):
312
        raise NotImplementedError()
313
314
    def _make_results_path(self):
315
        results_basename = self._get_results_basename()
316
        results_path = os.path.join(LogHelper.LOG_DIR, results_basename)
317
        self.results_path = LogHelper.find_name(results_path, '.xml')
318
319
    def _get_results_basename(self):
320
        raise NotImplementedError()
321
322
    def _generate_report_file(self):
323
        self.command_options.extend([
324
            '--report', self.report_path,
325
        ])
326
        self._filenames_to_clean_afterwards.add(self.report_path)
327
328
    def _wait_for_continue(self):
329
        """ In case user requests to leave machine in failed state for hands
330
        on debugging, ask for keypress to continue."""
331
        input_func("Paused for manual debugging. Continue by pressing return.")
332
333
    def prepare_online_scanning_arguments(self):
334
        self.command_options.extend([
335
            '--benchmark-id', self.benchmark_id,
336
            '--profile', self.profile,
337
            '--progress', '--oval-results',
338
        ])
339
        self.command_operands.append(self.datastream)
340
341
    def _remove_files_to_clean(self):
342
        if self.clean_files:
343
            for fname in tuple(self._filenames_to_clean_afterwards):
344
                try:
345
                    if os.path.exists(fname):
346
                        os.remove(fname)
347
                except OSError:
348
                    logging.error(
349
                        "Failed to cleanup file '{0}'"
350
                        .format(fname))
351
                finally:
352
                    self._filenames_to_clean_afterwards.remove(fname)
353
354
    def run_stage(self, stage):
355
        self.stage = stage
356
357
        self._make_verbose_path()
358
        self._make_report_path()
359
        self._make_arf_path()
360
        self._make_results_path()
361
362
        self.command_base = []
363
        self.command_options = ['--verbose', 'DEVEL']
364
        self.command_operands = []
365
366
        result = None
367
        if stage == 'initial':
368
            result = self.initial()
369
        elif stage == 'remediation':
370
            result = self.remediation()
371
        elif stage == 'final':
372
            result = self.final()
373
        else:
374
            raise RuntimeError('Unknown stage: {}.'.format(stage))
375
376
        self._remove_files_to_clean()
377
378
        if result == 1:
379
            LogHelper.log_preloaded('pass')
380
            if self.clean_files:
381
                self._filenames_to_clean_afterwards.add(self.verbose_path)
382
                if stage in ['initial', 'remediation', 'final']:
383
                    # We need the initial ARF so we can generate the remediation out of it later
384
                    self._filenames_to_clean_afterwards.add(self.arf_path)
385
386
        elif result == 2:
387
            LogHelper.log_preloaded('notapplicable')
388
        else:
389
            LogHelper.log_preloaded('fail')
390
            if self.manual_debug:
391
                self._wait_for_continue()
392
        return result
393
394
    @property
395
    def get_command(self):
396
        return self.command_base + self.command_options + self.command_operands
397
398
    def make_oscap_call(self):
399
        raise NotImplementedError()
400
401
    def initial(self):
402
        if self.create_reports and "--results-arf" not in self.command_options:
403
            self.command_options += ['--results-arf', self.arf_path]
404
        result = self.make_oscap_call()
405
        return result
406
407
    def remediation(self):
408
        raise NotImplementedError()
409
410
    def final(self):
411
        if self.create_reports and "--results-arf" not in self.command_options:
412
            self.command_options += ['--results-arf', self.arf_path]
413
        result = self.make_oscap_call()
414
        return result
415
416
    def analyze(self, stage):
417
        triaged_results = triage_xml_results(self.results_path)
418
        triaged_results["stage"] = stage
419
        triaged_results["runner"] = self.__class__.__name__
420
        return triaged_results
421
422
    def _get_formatting_dict_for_remediation(self):
423
        formatting = {
424
            'domain_ip': self.environment.domain_ip,
425
            'profile': self.profile,
426
            'datastream': self.datastream,
427
            'benchmark_id': self.benchmark_id
428
        }
429
        formatting['source_arf'] = self._get_initial_arf_path()
430
        formatting['source_arf_basename'] = os.path.basename(formatting['source_arf'])
431
        return formatting
432
433
434
class ProfileRunner(GenericRunner):
435
    def _get_arf_basename(self, stage=None):
436
        if stage is None:
437
            stage = self.stage
438
        return '{0}-{1}-arf.xml'.format(self.profile, stage)
439
440
    def _get_initial_arf_path(self):
441
        return os.path.join(LogHelper.LOG_DIR, self._get_arf_basename("initial"))
442
443
    def _get_verbose_basename(self):
444
        return '{0}-{1}'.format(self.profile, self.stage)
445
446
    def _get_report_basename(self):
447
        return '{0}-{1}'.format(self.profile, self.stage)
448
449
    def _get_results_basename(self):
450
        return '{0}-{1}-results'.format(self.profile, self.stage)
451
452
    def final(self):
453
        if self.environment.name == 'libvirt-based':
454
            logging.info("Rebooting domain '{0}' before final scan."
455
                         .format(self.environment.domain_name))
456
            self.environment.reboot()
457
            logging.info("Waiting for {0} seconds to let the system finish startup."
458
                         .format(self.time_to_finish_startup))
459
            time.sleep(self.time_to_finish_startup)
460
        return GenericRunner.final(self)
461
462
    def make_oscap_call(self):
463
        self.prepare_online_scanning_arguments()
464
        self._generate_report_file()
465
        returncode, self._oscap_output = self.environment.scan(
466
            self.command_options + self.command_operands, self.verbose_path)
467
468
        if self.create_reports:
469
            self.environment.arf_to_html(self.arf_path)
470
471
        if returncode not in [0, 2]:
472
            logging.error(('Profile run should end with return code 0 or 2 '
473
                           'not "{0}" as it did!').format(returncode))
474
            return False
475
        return True
476
477
478
class RuleRunner(GenericRunner):
479
    def __init__(
480
            self, environment, profile, datastream, benchmark_id,
481
            rule_id, script_name, dont_clean, no_reports, manual_debug):
482
        super(RuleRunner, self).__init__(
483
            environment, profile, datastream, benchmark_id,
484
        )
485
486
        self.rule_id = rule_id
487
        self.short_rule_id = re.sub(r'.*content_rule_', '', self.rule_id)
488
        self.context = None
489
        self.script_name = script_name
490
        self.clean_files = not dont_clean
491
        self.create_reports = not no_reports
492
        self.manual_debug = manual_debug
493
494
        self._oscap_output = ''
495
496
    def _get_arf_basename(self, stage=None):
497
        if stage is None:
498
            stage = self.stage
499
        return '{0}-{1}-{2}-arf.xml'.format(self.short_rule_id, self.script_name, stage)
500
501
    def _get_initial_arf_path(self):
502
        return os.path.join(LogHelper.LOG_DIR, self._get_arf_basename("initial"))
503
504
    def _get_verbose_basename(self):
505
        return '{0}-{1}-{2}'.format(self.short_rule_id, self.script_name, self.stage)
506
507
    def _get_report_basename(self):
508
        return '{0}-{1}-{2}'.format(self.short_rule_id, self.script_name, self.stage)
509
510
    def _get_results_basename(self):
511
        return '{0}-{1}-{2}-results-{3}'.format(
512
            self.short_rule_id, self.script_name, self.profile, self.stage)
513
514
    def make_oscap_call(self):
515
        self.prepare_online_scanning_arguments()
516
        if self.create_reports:
517
            self._generate_report_file()
518
        self.command_options.extend(
519
            ['--rule', self.rule_id])
520
        returncode, self._oscap_output = self.environment.scan(
521
            self.command_options + self.command_operands, self.verbose_path)
522
523
        if self.create_reports:
524
            self.environment.arf_to_html(self.arf_path)
525
526
        return self._analyze_output_of_oscap_call()
527
528
    def final(self):
529
        success = super(RuleRunner, self).final()
530
        success = success and self._analyze_output_of_oscap_call()
531
532
        return success
533
534
    def _find_rule_result_in_output(self):
535
        # oscap --progress options outputs rule results to stdout in
536
        # following format:
537
        # xccdf_org....rule_accounts_password_minlen_login_defs:pass
538
        match = re.findall('{0}:(.*)$'.format(self.rule_id),
539
                           self._oscap_output,
540
                           re.MULTILINE)
541
542
        if not match:
543
            # When the rule is not selected, it won't match in output
544
            return "notselected"
545
546
        # When --remediation is executed, there will be two entries in
547
        # progress output, one for fail, and one for fixed, e.g.
548
        # xccdf_org....rule_accounts_password_minlen_login_defs:fail
549
        # xccdf_org....rule_accounts_password_minlen_login_defs:fixed
550
        # We are interested in the last one
551
        return match[-1]
552
553
    def _analyze_output_of_oscap_call(self):
554
        local_success = 1
555
        # check expected result
556
        rule_result = self._find_rule_result_in_output()
557
558
        if rule_result == "notapplicable":
559
            msg = (
560
                'Rule {0} evaluation resulted in {1}'
561
                .format(self.rule_id, rule_result))
562
            LogHelper.preload_log(logging.WARNING, msg, 'notapplicable')
563
            local_success = 2
564
            return local_success
565
        if rule_result != self.context:
566
            local_success = 0
567
            if rule_result == 'notselected':
568
                msg = (
569
                    'Rule {0} has not been evaluated! '
570
                    'Wrong profile selected in test scenario?'
571
                    .format(self.rule_id))
572
            else:
573
                msg = (
574
                    'Rule evaluation resulted in {0}, '
575
                    'instead of expected {1} during {2} stage '
576
                    .format(rule_result, self.context, self.stage)
577
                )
578
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
579
        return local_success
580
581
    def _get_formatting_dict_for_remediation(self):
582
        fmt = super(RuleRunner, self)._get_formatting_dict_for_remediation()
583
        fmt['rule_id'] = self.rule_id
584
585
        return fmt
586
587
    def run_stage_with_context(self, stage, context):
588
        self.context = context
589
        return self.run_stage(stage)
590
591
592
class OscapProfileRunner(ProfileRunner):
593
    def remediation(self):
594
        self.command_options += ['--remediate']
595
        return self.make_oscap_call()
596
597
598
class AnsibleProfileRunner(ProfileRunner):
599
    def initial(self):
600
        self.command_options += ['--results-arf', self.arf_path]
601
        return super(AnsibleProfileRunner, self).initial()
602
603
    def remediation(self):
604
        formatting = self._get_formatting_dict_for_remediation()
605
        formatting['output_file'] = '{0}.yml'.format(self.profile)
606
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
607
                                              formatting['output_file'])
608
609
        return run_stage_remediation_ansible('profile', self.environment,
610
                                             formatting,
611
                                             self.verbose_path)
612
613
614
class BashProfileRunner(ProfileRunner):
615
    def initial(self):
616
        self.command_options += ['--results-arf', self.arf_path]
617
        return super(BashProfileRunner, self).initial()
618
619
    def remediation(self):
620
        formatting = self._get_formatting_dict_for_remediation()
621
        formatting['output_file'] = '{0}.sh'.format(self.profile)
622
623
        return run_stage_remediation_bash('profile', self.environment, formatting, self.verbose_path)
624
625
626
class OscapRuleRunner(RuleRunner):
627
    def remediation(self):
628
        self.command_options += ['--remediate']
629
        self.command_options += ['--results-arf', self.arf_path]
630
        return self.make_oscap_call()
631
632
    def final(self):
633
        """ There is no need to run final scan again - result won't be different
634
        to what we already have in remediation step."""
635
        return True
636
637
638
class BashRuleRunner(RuleRunner):
639
    def initial(self):
640
        self.command_options += ['--results-arf', self.arf_path]
641
        return super(BashRuleRunner, self).initial()
642
643
    def remediation(self):
644
645
        formatting = self._get_formatting_dict_for_remediation()
646
        formatting['output_file'] = '{0}.sh'.format(self.rule_id)
647
648
        success = run_stage_remediation_bash('rule', self.environment, formatting, self.verbose_path)
649
        return success
650
651
652
class AnsibleRuleRunner(RuleRunner):
653
    def initial(self):
654
        self.command_options += ['--results-arf', self.arf_path]
655
        return super(AnsibleRuleRunner, self).initial()
656
657
    def remediation(self):
658
        formatting = self._get_formatting_dict_for_remediation()
659
        formatting['output_file'] = '{0}.yml'.format(self.rule_id)
660
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
661
                                              formatting['output_file'])
662
663
        success = run_stage_remediation_ansible('rule', self.environment, formatting, self.verbose_path)
664
        return success
665
666
667
class Checker(object):
668
    def __init__(self, test_env):
669
        self.test_env = test_env
670
        self.executed_tests = 0
671
672
        self.datastream = ""
673
        self.benchmark_id = ""
674
        self.remediate_using = ""
675
        self.benchmark_cpes = set()
676
677
        now = datetime.datetime.now()
678
        self.test_timestamp_str = now.strftime("%Y-%m-%d %H:%M")
679
680
    def test_target(self):
681
        self.start()
682
        try:
683
            self._test_target()
684
        except KeyboardInterrupt:
685
            logging.info("Terminating the test run due to keyboard interrupt.")
686
        except RuntimeError as exc:
687
            logging.error("Terminating due to error: {msg}.".format(msg=str(exc)))
688
        except TimeoutException as exc:
689
            logging.error("Terminating due to timeout: {msg}".format(msg=str(exc)))
690
        finally:
691
            self.finalize()
692
693
    def run_test_for_all_profiles(self, profiles, test_data=None):
694
        if len(profiles) > 1:
695
            with test_env.SavedState.create_from_environment(self.test_env, "prepared") as state:
696
                args_list = [(p, test_data) for p in profiles]
697
                state.map_on_top(self._run_test, args_list)
698
        elif profiles:
699
            self._run_test(profiles[0], test_data)
700
701
    def _test_target(self):
702
        raise NotImplementedError()
703
704
    def _run_test(self, profile, test_data):
705
        raise NotImplementedError()
706
707
    def start(self):
708
        self.executed_tests = 0
709
710
        try:
711
            self.test_env.start()
712
        except Exception as exc:
713
            msg = ("Failed to start test environment '{0}': {1}"
714
                   .format(self.test_env.name, str(exc)))
715
            raise RuntimeError(msg)
716
717
    def finalize(self):
718
        if not self.executed_tests:
719
            logging.warning("Nothing has been tested!")
720
721
        try:
722
            self.test_env.finalize()
723
        except Exception as exc:
724
            msg = ("Failed to finalize test environment '{0}': {1}"
725
                   .format(self.test_env.name, str(exc)))
726
            raise RuntimeError(msg)
727
728
729
REMEDIATION_PROFILE_RUNNERS = {
730
    'oscap': OscapProfileRunner,
731
    'bash': BashProfileRunner,
732
    'ansible': AnsibleProfileRunner,
733
}
734
735
736
REMEDIATION_RULE_RUNNERS = {
737
    'oscap': OscapRuleRunner,
738
    'bash': BashRuleRunner,
739
    'ansible': AnsibleRuleRunner,
740
}
741
742
743
REMEDIATION_RUNNER_TO_REMEDIATION_MEANS = {
744
    'oscap': 'bash',
745
    'bash': 'bash',
746
    'ansible': 'ansible',
747
}
748