Test Failed
Push — master ( 05831d...bcc24a )
by Jan
02:33 queued 12s
created

get_result_id_from_arf()   A

Complexity

Conditions 3

Size

Total Lines 12
Code Lines 12

Duplication

Lines 0
Ratio 0 %

Code Coverage

Tests 0
CRAP Score 12

Importance

Changes 0
Metric Value
cc 3
eloc 12
nop 2
dl 0
loc 12
ccs 0
cts 10
cp 0
crap 12
rs 9.8
c 0
b 0
f 0
1
#!/usr/bin/env python
2
from __future__ import print_function
3
4
import logging
5
import os.path
6
import re
7
import collections
8
import xml.etree.ElementTree
9
import json
10
import datetime
11
import socket
12
import sys
13
import time
14
import subprocess
15
16
from ssg.constants import OSCAP_PROFILE_ALL_ID
17
18
from ssg_test_suite.log import LogHelper
19
from ssg_test_suite import test_env
20
from ssg_test_suite import common
21
22
from ssg.shims import input_func
23
24
# Needed for compatibility as there is no TimeoutError in python2.
25
if sys.version_info[0] < 3:
26
    TimeoutException = socket.timeout
27
else:
28
    TimeoutException = TimeoutError
29
30
logging.getLogger(__name__).addHandler(logging.NullHandler())
31
32
_CONTEXT_RETURN_CODES = {'pass': 0,
33
                         'fail': 2,
34
                         'error': 1,
35
                         'notapplicable': 0,
36
                         'fixed': 0}
37
38
_ANSIBLE_TEMPLATE = 'urn:xccdf:fix:script:ansible'
39
_BASH_TEMPLATE = 'urn:xccdf:fix:script:sh'
40
_XCCDF_NS = 'http://checklists.nist.gov/xccdf/1.2'
41
42
43
PROFILE_ALL_ID_SINGLE_QUOTED = False
44
45
46
def analysis_to_serializable(analysis):
47
    result = dict(analysis)
48
    for key, value in analysis.items():
49
        if type(value) == set:
50
            result[key] = tuple(value)
51
    return result
52
53
54
def save_analysis_to_json(analysis, output_fname):
55
    analysis2 = analysis_to_serializable(analysis)
56
    with open(output_fname, "w") as f:
57
        json.dump(analysis2, f)
58
59
60
def triage_xml_results(fname):
61
    tree = xml.etree.ElementTree.parse(fname)
62
    all_xml_results = tree.findall(".//{%s}rule-result" % _XCCDF_NS)
63
64
    triaged = collections.defaultdict(set)
65
    for result in list(all_xml_results):
66
        idref = result.get("idref")
67
        status = result.find("{%s}result" % _XCCDF_NS).text
68
        triaged[status].add(idref)
69
70
    return triaged
71
72
73
def send_files_remote(verbose_path, remote_dir, domain_ip, *files):
74
    """Upload files to VM."""
75
    # files is a list of absolute paths on the host
76
    success = True
77
    destination = 'root@{0}:{1}'.format(domain_ip, remote_dir)
78
    files_string = ' '.join(files)
79
80
    logging.debug('Uploading files {0} to {1}'.format(files_string,
81
                                                      destination))
82
    command = ['scp'] + list(common.SSH_ADDITIONAL_OPTS) + list(files) + [destination]
83
    if common.run_cmd_local(command, verbose_path)[0] != 0:
84
        logging.error('Failed to upload files {0}'.format(files_string))
85
        success = False
86
    return success
87
88
89
def get_file_remote(test_env, verbose_path, local_dir, remote_path):
90
    """Download a file from VM."""
91
    # remote_path is an absolute path of a file on remote machine
92
    success = True
93
    logging.debug('Downloading remote file {0} to {1}'
94
                  .format(remote_path, local_dir))
95
    with open(verbose_path, "a") as log_file:
96
        try:
97
            test_env.scp_download_file(remote_path, local_dir, log_file)
98
        except Exception:
99
            logging.error('Failed to download file {0}'.format(remote_path))
100
            success = False
101
    return success
102
103
104
def find_result_id_in_output(output):
105
    match = re.search('result id.*$', output, re.IGNORECASE | re.MULTILINE)
106
    if match is None:
107
        return None
108
    # Return the right most word of the match which is the result id.
109
    return match.group(0).split()[-1]
110
111
112
def get_result_id_from_arf(arf_path, verbose_path):
113
    command = ['oscap', 'info', arf_path]
114
    command_string = ' '.join(command)
115
    returncode, output = common.run_cmd_local(command, verbose_path)
116
    if returncode != 0:
117
        raise RuntimeError('{0} returned {1} exit code'.
118
                           format(command_string, returncode))
119
    res_id = find_result_id_in_output(output)
120
    if res_id is None:
121
        raise RuntimeError('Failed to find result ID in {0}'
122
                           .format(arf_path))
123
    return res_id
124
125
126
def single_quote_string(input):
127
    result = input
128
    for char in "\"'":
129
        result = result.replace(char, "")
130
    return "'{}'".format(result)
131
132
133
def generate_fixes_remotely(test_env, formatting, verbose_path):
134
    command_base = ['oscap', 'xccdf', 'generate', 'fix']
135
    command_options = [
136
        '--benchmark-id', formatting['benchmark_id'],
137
        '--profile', formatting['profile'],
138
        '--template', formatting['output_template'],
139
        '--output', '/{output_file}'.format(** formatting),
140
    ]
141
    command_operands = ['/{arf_file}'.format(** formatting)]
142
    if 'result_id' in formatting:
143
        command_options.extend(['--result-id', formatting['result_id']])
144
145
    command_components = command_base + command_options + command_operands
146
    command_string = ' '.join([single_quote_string(c) for c in command_components])
147
    with open(verbose_path, "a") as log_file:
148
        test_env.execute_ssh_command(command_string, log_file)
149
150
151
def run_stage_remediation_ansible(run_type, test_env, formatting, verbose_path):
152
    """
153
       Returns False on error, or True in case of successful Ansible playbook
154
       run."""
155
    formatting['output_template'] = _ANSIBLE_TEMPLATE
156
    send_arf_to_remote_machine_and_generate_remediations_there(
157
        run_type, test_env, formatting, verbose_path)
158
    if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
159
                           '/' + formatting['output_file']):
160
        return False
161
    command = (
162
        'ansible-playbook', '-v', '-i', '{0},'.format(formatting['domain_ip']),
163
        '-u' 'root', '--ssh-common-args={0}'.format(' '.join(test_env.ssh_additional_options)),
164
        formatting['playbook'])
165
    command_string = ' '.join(command)
166
    returncode, output = common.run_cmd_local(command, verbose_path)
167
    # Appends output of ansible-playbook to the verbose_path file.
168
    with open(verbose_path, 'ab') as f:
169
        f.write('Stdout of "{}":'.format(command_string).encode("utf-8"))
170
        f.write(output.encode("utf-8"))
171
    if returncode != 0:
172
        msg = (
173
            'Ansible playbook remediation run has '
174
            'exited with return code {} instead of expected 0'
175
            .format(returncode))
176
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
177
        return False
178
    return True
179
180
181
def run_stage_remediation_bash(run_type, test_env, formatting, verbose_path):
182
    """
183
       Returns False on error, or True in case of successful bash scripts
184
       run."""
185
    formatting['output_template'] = _BASH_TEMPLATE
186
    send_arf_to_remote_machine_and_generate_remediations_there(
187
        run_type, test_env, formatting, verbose_path)
188
    if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
189
                           '/' + formatting['output_file']):
190
        return False
191
192
    command_string = '/bin/bash -x /{output_file}'.format(** formatting)
193
194
    with open(verbose_path, "a") as log_file:
195
        try:
196
            test_env.execute_ssh_command(command_string, log_file)
197
        except Exception as exc:
198
            msg = (
199
                'Bash script remediation run has exited with return code {} '
200
                'instead of expected 0'.format(exc.returncode))
201
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
202
            return False
203
    return True
204
205
206
def send_arf_to_remote_machine_and_generate_remediations_there(
207
        run_type, test_env, formatting, verbose_path):
208
    if run_type == 'rule':
209
        try:
210
            res_id = get_result_id_from_arf(formatting['arf'], verbose_path)
211
        except Exception as exc:
212
            logging.error(str(exc))
213
            return False
214
        formatting['result_id'] = res_id
215
216
    with open(verbose_path, "a") as log_file:
217
        try:
218
            test_env.scp_upload_file(formatting["arf"], "/", log_file)
219
        except Exception:
220
            return False
221
222
    try:
223
        generate_fixes_remotely(test_env, formatting, verbose_path)
224
    except Exception as exc:
225
        logging.error(str(exc))
226
        return False
227
228
229
def is_virtual_oscap_profile(profile):
230
    """ Test if the profile belongs to the so called category virtual
231
        from OpenSCAP available profiles. It can be (all) or other id we
232
        might come up in the future, it just needs to be encapsulated
233
        with parenthesis for example "(custom_profile)".
234
    """
235
    if profile is not None:
236
        if profile == OSCAP_PROFILE_ALL_ID:
237
            return True
238
        else:
239
            if "(" == profile[:1] and ")" == profile[-1:]:
240
                return True
241
    return False
242
243
244
def process_profile_id(profile):
245
    # Detect if the profile is virtual and include single quotes if needed.
246
    if is_virtual_oscap_profile(profile):
247
        if PROFILE_ALL_ID_SINGLE_QUOTED:
248
            return "'{}'".format(profile)
249
        else:
250
            return profile
251
    else:
252
        return profile
253
254
255
class GenericRunner(object):
256
    def __init__(self, environment, profile, datastream, benchmark_id):
257
        self.environment = environment
258
        self.profile = profile
259
        self.datastream = datastream
260
        self.benchmark_id = benchmark_id
261
262
        self.arf_file = ''
263
        self.arf_path = ''
264
        self.verbose_path = ''
265
        self.report_path = ''
266
        self.results_path = ''
267
        self.stage = 'undefined'
268
269
        self.clean_files = False
270
        self.create_reports = True
271
        self.manual_debug = False
272
        self._filenames_to_clean_afterwards = set()
273
274
        self.command_base = []
275
        self.command_options = []
276
        self.command_operands = []
277
        # number of seconds to sleep after reboot of vm to let
278
        # the system to finish startup, there were problems with
279
        # temporary files created by Dracut during image generation interfering
280
        # with the scan
281
        self.time_to_finish_startup = 30
282
283
    def _make_arf_path(self):
284
        self.arf_file = self._get_arf_file()
285
        self.arf_path = os.path.join(LogHelper.LOG_DIR, self.arf_file)
286
287
    def _get_arf_file(self):
288
        raise NotImplementedError()
289
290
    def _make_verbose_path(self):
291
        verbose_file = self._get_verbose_file()
292
        verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_file)
293
        self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')
294
295
    def _get_verbose_file(self):
296
        raise NotImplementedError()
297
298
    def _make_report_path(self):
299
        report_file = self._get_report_file()
300
        report_path = os.path.join(LogHelper.LOG_DIR, report_file)
301
        self.report_path = LogHelper.find_name(report_path, '.html')
302
303
    def _get_report_file(self):
304
        raise NotImplementedError()
305
306
    def _make_results_path(self):
307
        results_file = self._get_results_file()
308
        results_path = os.path.join(LogHelper.LOG_DIR, results_file)
309
        self.results_path = LogHelper.find_name(results_path, '.xml')
310
311
    def _get_results_file(self):
312
        raise NotImplementedError()
313
314
    def _generate_report_file(self):
315
        self.command_options.extend([
316
            '--report', self.report_path,
317
        ])
318
        self._filenames_to_clean_afterwards.add(self.report_path)
319
320
    def _wait_for_continue(self):
321
        """ In case user requests to leave machine in failed state for hands
322
        on debugging, ask for keypress to continue."""
323
        input_func("Paused for manual debugging. Continue by pressing return.")
324
325
    def prepare_online_scanning_arguments(self):
326
        self.command_options.extend([
327
            '--benchmark-id', self.benchmark_id,
328
            '--profile', self.profile,
329
            '--progress', '--oval-results',
330
        ])
331
        self.command_operands.append(self.datastream)
332
333
    def run_stage(self, stage):
334
        self.stage = stage
335
336
        self._make_verbose_path()
337
        self._make_report_path()
338
        self._make_arf_path()
339
        self._make_results_path()
340
341
        self.command_base = []
342
        self.command_options = ['--verbose', 'DEVEL']
343
        self.command_operands = []
344
345
        result = None
346
        if stage == 'initial':
347
            result = self.initial()
348
        elif stage == 'remediation':
349
            result = self.remediation()
350
        elif stage == 'final':
351
            result = self.final()
352
        else:
353
            raise RuntimeError('Unknown stage: {}.'.format(stage))
354
355
        if self.clean_files:
356
            for fname in tuple(self._filenames_to_clean_afterwards):
357
                try:
358
                    os.remove(fname)
359
                except OSError:
360
                    logging.error(
361
                        "Failed to cleanup file '{0}'"
362
                        .format(fname))
363
                finally:
364
                    self._filenames_to_clean_afterwards.remove(fname)
365
366
        if result == 1:
367
            LogHelper.log_preloaded('pass')
368
            if self.clean_files:
369
                files_to_remove = [self.verbose_path]
370
                if stage in ['initial', 'remediation', 'final']:
371
                    files_to_remove.append(self.arf_path)
372
373
                for fname in tuple(files_to_remove):
374
                    try:
375
                        if os.path.exists(fname):
376
                            os.remove(fname)
377
                    except OSError:
378
                        logging.error(
379
                            "Failed to cleanup file '{0}'"
380
                            .format(fname))
381
        elif result == 2:
382
            LogHelper.log_preloaded('notapplicable')
383
        else:
384
            LogHelper.log_preloaded('fail')
385
            if self.manual_debug:
386
                self._wait_for_continue()
387
        return result
388
389
    @property
390
    def get_command(self):
391
        return self.command_base + self.command_options + self.command_operands
392
393
    def make_oscap_call(self):
394
        raise NotImplementedError()
395
396
    def initial(self):
397
        if self.create_reports:
398
            self.command_options += ['--results-arf', self.arf_path]
399
        result = self.make_oscap_call()
400
        return result
401
402
    def remediation(self):
403
        raise NotImplementedError()
404
405
    def final(self):
406
        if self.create_reports:
407
            self.command_options += ['--results-arf', self.arf_path]
408
        result = self.make_oscap_call()
409
        return result
410
411
    def analyze(self, stage):
412
        triaged_results = triage_xml_results(self.results_path)
413
        triaged_results["stage"] = stage
414
        triaged_results["runner"] = self.__class__.__name__
415
        return triaged_results
416
417
    def _get_formatting_dict_for_remediation(self):
418
        formatting = {
419
            'domain_ip': self.environment.domain_ip,
420
            'profile': self.profile,
421
            'datastream': self.datastream,
422
            'benchmark_id': self.benchmark_id
423
        }
424
        formatting['arf'] = self.arf_path
425
        formatting['arf_file'] = self.arf_file
426
        return formatting
427
428
429
class ProfileRunner(GenericRunner):
430
    def _get_arf_file(self):
431
        return '{0}-{self.stage}-arf.xml'.format(self.profile, self.stage)
432
433
    def _get_verbose_file(self):
434
        return '{0}-{1}'.format(self.profile, self.stage)
435
436
    def _get_report_file(self):
437
        return '{0}-{1}'.format(self.profile, self.stage)
438
439
    def _get_results_file(self):
440
        return '{0}-{1}-results'.format(self.profile, self.stage)
441
442
    def final(self):
443
        if self.environment.name == 'libvirt-based':
444
            logging.info("Rebooting domain '{0}' before final scan."
445
                         .format(self.environment.domain_name))
446
            self.environment.reboot()
447
            logging.info("Waiting for {0} seconds to let the system finish startup."
448
                         .format(self.time_to_finish_startup))
449
            time.sleep(self.time_to_finish_startup)
450
        return GenericRunner.final(self)
451
452
    def make_oscap_call(self):
453
        self.prepare_online_scanning_arguments()
454
        self._generate_report_file()
455
        returncode, self._oscap_output = self.environment.scan(
456
            self.command_options + self.command_operands, self.verbose_path)
457
458
        if self.create_reports:
459
            self.environment.arf_to_html(self.arf_path)
460
461
        if returncode not in [0, 2]:
462
            logging.error(('Profile run should end with return code 0 or 2 '
463
                           'not "{0}" as it did!').format(returncode))
464
            return False
465
        return True
466
467
468
class RuleRunner(GenericRunner):
469
    def __init__(
470
            self, environment, profile, datastream, benchmark_id,
471
            rule_id, script_name, dont_clean, no_reports, manual_debug):
472
        super(RuleRunner, self).__init__(
473
            environment, profile, datastream, benchmark_id,
474
        )
475
476
        self.rule_id = rule_id
477
        self.short_rule_id = re.sub(r'.*content_rule_', '', self.rule_id)
478
        self.context = None
479
        self.script_name = script_name
480
        self.clean_files = not dont_clean
481
        self.create_reports = not no_reports
482
        self.manual_debug = manual_debug
483
484
        self._oscap_output = ''
485
486
    def _get_arf_file(self):
487
        return '{0}-{1}-{2}-arf.xml'.format(self.short_rule_id, self.script_name, self.stage)
488
489
    def _get_verbose_file(self):
490
        return '{0}-{1}-{2}'.format(self.short_rule_id, self.script_name, self.stage)
491
492
    def _get_report_file(self):
493
        return '{0}-{1}-{2}'.format(self.short_rule_id, self.script_name, self.stage)
494
495
    def _get_results_file(self):
496
        return '{0}-{1}-{2}-results-{3}'.format(
497
            self.short_rule_id, self.script_name, self.profile, self.stage)
498
499
    def make_oscap_call(self):
500
        self.prepare_online_scanning_arguments()
501
        if self.create_reports:
502
            self._generate_report_file()
503
        self.command_options.extend(
504
            ['--rule', self.rule_id])
505
        returncode, self._oscap_output = self.environment.scan(
506
            self.command_options + self.command_operands, self.verbose_path)
507
508
        if self.create_reports:
509
            self.environment.arf_to_html(self.arf_path)
510
511
        return self._analyze_output_of_oscap_call()
512
513
    def final(self):
514
        success = super(RuleRunner, self).final()
515
        success = success and self._analyze_output_of_oscap_call()
516
517
        return success
518
519
    def _find_rule_result_in_output(self):
520
        # oscap --progress options outputs rule results to stdout in
521
        # following format:
522
        # xccdf_org....rule_accounts_password_minlen_login_defs:pass
523
        match = re.findall('{0}:(.*)$'.format(self.rule_id),
524
                           self._oscap_output,
525
                           re.MULTILINE)
526
527
        if not match:
528
            # When the rule is not selected, it won't match in output
529
            return "notselected"
530
531
        # When --remediation is executed, there will be two entries in
532
        # progress output, one for fail, and one for fixed, e.g.
533
        # xccdf_org....rule_accounts_password_minlen_login_defs:fail
534
        # xccdf_org....rule_accounts_password_minlen_login_defs:fixed
535
        # We are interested in the last one
536
        return match[-1]
537
538
    def _analyze_output_of_oscap_call(self):
539
        local_success = 1
540
        # check expected result
541
        rule_result = self._find_rule_result_in_output()
542
543
        if rule_result == "notapplicable":
544
            msg = (
545
                'Rule {0} evaluation resulted in {1}'
546
                .format(self.rule_id, rule_result))
547
            LogHelper.preload_log(logging.WARNING, msg, 'notapplicable')
548
            local_success = 2
549
            return local_success
550
        if rule_result != self.context:
551
            local_success = 0
552
            if rule_result == 'notselected':
553
                msg = (
554
                    'Rule {0} has not been evaluated! '
555
                    'Wrong profile selected in test scenario?'
556
                    .format(self.rule_id))
557
            else:
558
                msg = (
559
                    'Rule evaluation resulted in {0}, '
560
                    'instead of expected {1} during {2} stage '
561
                    .format(rule_result, self.context, self.stage)
562
                )
563
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
564
        return local_success
565
566
    def _get_formatting_dict_for_remediation(self):
567
        fmt = super(RuleRunner, self)._get_formatting_dict_for_remediation()
568
        fmt['rule_id'] = self.rule_id
569
570
        return fmt
571
572
    def run_stage_with_context(self, stage, context):
573
        self.context = context
574
        return self.run_stage(stage)
575
576
577
class OscapProfileRunner(ProfileRunner):
578
    def remediation(self):
579
        self.command_options += ['--remediate']
580
        return self.make_oscap_call()
581
582
583
class AnsibleProfileRunner(ProfileRunner):
584
    def initial(self):
585
        self.command_options += ['--results-arf', self.arf_path]
586
        return super(AnsibleProfileRunner, self).initial()
587
588
    def remediation(self):
589
        formatting = self._get_formatting_dict_for_remediation()
590
        formatting['output_file'] = '{0}.yml'.format(self.profile)
591
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
592
                                              formatting['output_file'])
593
594
        return run_stage_remediation_ansible('profile', self.environment,
595
                                             formatting,
596
                                             self.verbose_path)
597
598
599
class BashProfileRunner(ProfileRunner):
600
    def remediation(self):
601
        formatting = self._get_formatting_dict_for_remediation()
602
        formatting['output_file'] = '{0}.sh'.format(self.profile)
603
604
        return run_stage_remediation_bash('profile', self.environment, formatting, self.verbose_path)
605
606
607
class OscapRuleRunner(RuleRunner):
608
    def remediation(self):
609
        self.command_options += ['--remediate']
610
        self.command_options += ['--results-arf', self.arf_path]
611
        return self.make_oscap_call()
612
613
    def final(self):
614
        """ There is no need to run final scan again - result won't be different
615
        to what we already have in remediation step."""
616
        return True
617
618
619
class BashRuleRunner(RuleRunner):
620
    def remediation(self):
621
622
        formatting = self._get_formatting_dict_for_remediation()
623
        formatting['output_file'] = '{0}.sh'.format(self.rule_id)
624
625
        success = run_stage_remediation_bash('rule', self.environment, formatting, self.verbose_path)
626
        return success
627
628
629
class AnsibleRuleRunner(RuleRunner):
630
    def remediation(self):
631
        formatting = self._get_formatting_dict_for_remediation()
632
        formatting['output_file'] = '{0}.yml'.format(self.rule_id)
633
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
634
                                              formatting['output_file'])
635
636
        success = run_stage_remediation_ansible('rule', self.environment, formatting, self.verbose_path)
637
        return success
638
639
640
class Checker(object):
641
    def __init__(self, test_env):
642
        self.test_env = test_env
643
        self.executed_tests = 0
644
645
        self.datastream = ""
646
        self.benchmark_id = ""
647
        self.remediate_using = ""
648
        self.benchmark_cpes = set()
649
650
        now = datetime.datetime.now()
651
        self.test_timestamp_str = now.strftime("%Y-%m-%d %H:%M")
652
653
    def test_target(self, target):
654
        self.start()
655
        try:
656
            self._test_target(target)
657
        except KeyboardInterrupt:
658
            logging.info("Terminating the test run due to keyboard interrupt.")
659
        except RuntimeError as exc:
660
            logging.error("Terminating due to error: {msg}.".format(msg=str(exc)))
661
        except TimeoutException as exc:
662
            logging.error("Terminating due to timeout: {msg}".format(msg=str(exc)))
663
        finally:
664
            self.finalize()
665
666
    def run_test_for_all_profiles(self, profiles, test_data=None):
667
        if len(profiles) > 1:
668
            with test_env.SavedState.create_from_environment(self.test_env, "prepared") as state:
669
                args_list = [(p, test_data) for p in profiles]
670
                state.map_on_top(self._run_test, args_list)
671
        elif profiles:
672
            self._run_test(profiles[0], test_data)
673
674
    def _test_target(self, target):
675
        raise NotImplementedError()
676
677
    def _run_test(self, profile, test_data):
678
        raise NotImplementedError()
679
680
    def start(self):
681
        self.executed_tests = 0
682
683
        try:
684
            self.test_env.start()
685
        except Exception as exc:
686
            msg = ("Failed to start test environment '{0}': {1}"
687
                   .format(self.test_env.name, str(exc)))
688
            raise RuntimeError(msg)
689
690
    def finalize(self):
691
        if not self.executed_tests:
692
            logging.warning("Nothing has been tested!")
693
694
        try:
695
            self.test_env.finalize()
696
        except Exception as exc:
697
            msg = ("Failed to finalize test environment '{0}': {1}"
698
                   .format(self.test_env.name, str(exc)))
699
            raise RuntimeError(msg)
700
701
702
REMEDIATION_PROFILE_RUNNERS = {
703
    'oscap': OscapProfileRunner,
704
    'bash': BashProfileRunner,
705
    'ansible': AnsibleProfileRunner,
706
}
707
708
709
REMEDIATION_RULE_RUNNERS = {
710
    'oscap': OscapRuleRunner,
711
    'bash': BashRuleRunner,
712
    'ansible': AnsibleRuleRunner,
713
}
714
715
716
REMEDIATION_RUNNER_TO_REMEDIATION_MEANS = {
717
    'oscap': 'bash',
718
    'bash': 'bash',
719
    'ansible': 'ansible',
720
}
721