Test Failed
Pull Request — master (#8025)
by Matěj
02:25
created

ProfileRunner._get_report_basename()   A

Complexity

Conditions 1

Size

Total Lines 2
Code Lines 2

Duplication

Lines 0
Ratio 0 %

Code Coverage

Tests 0
CRAP Score 2

Importance

Changes 0
Metric Value
cc 1
eloc 2
nop 1
dl 0
loc 2
ccs 0
cts 2
cp 0
crap 2
rs 10
c 0
b 0
f 0
1
#!/usr/bin/env python
2
from __future__ import print_function
3
4
import logging
5
import os.path
6
import re
7
import collections
8
import xml.etree.ElementTree
9
import json
10
import datetime
11
import socket
12
import sys
13
import time
14
import subprocess
15
16
from ssg.constants import OSCAP_PROFILE_ALL_ID
17
18
from ssg_test_suite.log import LogHelper
19
from ssg_test_suite import test_env
20
from ssg_test_suite import common
21
22
from ssg.shims import input_func
23
24
# Needed for compatibility as there is no TimeoutError in python2.
25
if sys.version_info[0] < 3:
26
    TimeoutException = socket.timeout
27
else:
28
    TimeoutException = TimeoutError
29
30
logging.getLogger(__name__).addHandler(logging.NullHandler())
31
32
_CONTEXT_RETURN_CODES = {'pass': 0,
33
                         'fail': 2,
34
                         'error': 1,
35
                         'notapplicable': 0,
36
                         'fixed': 0}
37
38
_ANSIBLE_TEMPLATE = 'urn:xccdf:fix:script:ansible'
39
_BASH_TEMPLATE = 'urn:xccdf:fix:script:sh'
40
_XCCDF_NS = 'http://checklists.nist.gov/xccdf/1.2'
41
42
43
PROFILE_ALL_ID_SINGLE_QUOTED = False
44
45
46
def analysis_to_serializable(analysis):
47
    result = dict(analysis)
48
    for key, value in analysis.items():
49
        if type(value) == set:
50
            result[key] = tuple(value)
51
    return result
52
53
54
def save_analysis_to_json(analysis, output_fname):
55
    analysis2 = analysis_to_serializable(analysis)
56
    with open(output_fname, "w") as f:
57
        json.dump(analysis2, f)
58
59
60
def triage_xml_results(fname):
61
    tree = xml.etree.ElementTree.parse(fname)
62
    all_xml_results = tree.findall(".//{%s}rule-result" % _XCCDF_NS)
63
64
    triaged = collections.defaultdict(set)
65
    for result in list(all_xml_results):
66
        idref = result.get("idref")
67
        status = result.find("{%s}result" % _XCCDF_NS).text
68
        triaged[status].add(idref)
69
70
    return triaged
71
72
73
def send_files_remote(verbose_path, remote_dir, domain_ip, *files):
74
    """Upload files to VM."""
75
    # files is a list of absolute paths on the host
76
    success = True
77
    destination = 'root@{0}:{1}'.format(domain_ip, remote_dir)
78
    files_string = ' '.join(files)
79
80
    logging.debug('Uploading files {0} to {1}'.format(files_string,
81
                                                      destination))
82
    command = ['scp'] + list(common.SSH_ADDITIONAL_OPTS) + list(files) + [destination]
83
    if common.run_cmd_local(command, verbose_path)[0] != 0:
84
        logging.error('Failed to upload files {0}'.format(files_string))
85
        success = False
86
    return success
87
88
89
def get_file_remote(test_env, verbose_path, local_dir, remote_path):
90
    """Download a file from VM."""
91
    # remote_path is an absolute path of a file on remote machine
92
    success = True
93
    logging.debug('Downloading remote file {0} to {1}'
94
                  .format(remote_path, local_dir))
95
    with open(verbose_path, "a") as log_file:
96
        try:
97
            test_env.scp_download_file(remote_path, local_dir, log_file)
98
        except Exception:
99
            logging.error('Failed to download file {0}'.format(remote_path))
100
            success = False
101
    return success
102
103
104
def find_result_id_in_output(output):
105
    match = re.search('result id.*$', output, re.IGNORECASE | re.MULTILINE)
106
    if match is None:
107
        return None
108
    # Return the right most word of the match which is the result id.
109
    return match.group(0).split()[-1]
110
111
112
def get_result_id_from_arf(arf_path, verbose_path):
113
    command = ['oscap', 'info', arf_path]
114
    command_string = ' '.join(command)
115
    returncode, output = common.run_cmd_local(command, verbose_path)
116
    if returncode != 0:
117
        raise RuntimeError('{0} returned {1} exit code'.
118
                           format(command_string, returncode))
119
    res_id = find_result_id_in_output(output)
120
    if res_id is None:
121
        raise RuntimeError('Failed to find result ID in {0}'
122
                           .format(arf_path))
123
    return res_id
124
125
126
def single_quote_string(input):
127
    result = input
128
    for char in "\"'":
129
        result = result.replace(char, "")
130
    return "'{}'".format(result)
131
132
133
def generate_fixes_remotely(test_env, formatting, verbose_path):
134
    command_base = ['oscap', 'xccdf', 'generate', 'fix']
135
    command_options = [
136
        '--benchmark-id', formatting['benchmark_id'],
137
        '--profile', formatting['profile'],
138
        '--template', formatting['output_template'],
139
        '--output', '/{output_file}'.format(** formatting),
140
    ]
141
    command_operands = ['/{source_arf_basename}'.format(** formatting)]
142
    if 'result_id' in formatting:
143
        command_options.extend(['--result-id', formatting['result_id']])
144
145
    command_components = command_base + command_options + command_operands
146
    command_string = ' '.join([single_quote_string(c) for c in command_components])
147
    with open(verbose_path, "a") as log_file:
148
        test_env.execute_ssh_command(command_string, log_file)
149
150
151
def run_stage_remediation_ansible(run_type, test_env, formatting, verbose_path):
152
    """
153
       Returns False on error, or True in case of successful Ansible playbook
154
       run."""
155
    formatting['output_template'] = _ANSIBLE_TEMPLATE
156
    send_arf_to_remote_machine_and_generate_remediations_there(
157
        run_type, test_env, formatting, verbose_path)
158
    if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
159
                           '/' + formatting['output_file']):
160
        return False
161
    command = (
162
        'ansible-playbook', '-v', '-i', '{0},'.format(formatting['domain_ip']),
163
        '-u' 'root', '--ssh-common-args={0}'.format(' '.join(test_env.ssh_additional_options)),
164
        formatting['playbook'])
165
    command_string = ' '.join(command)
166
    returncode, output = common.run_cmd_local(command, verbose_path)
167
    # Appends output of ansible-playbook to the verbose_path file.
168
    with open(verbose_path, 'ab') as f:
169
        f.write('Stdout of "{}":'.format(command_string).encode("utf-8"))
170
        f.write(output.encode("utf-8"))
171
    if returncode != 0:
172
        msg = (
173
            'Ansible playbook remediation run has '
174
            'exited with return code {} instead of expected 0'
175
            .format(returncode))
176
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
177
        return False
178
    return True
179
180
181
def run_stage_remediation_bash(run_type, test_env, formatting, verbose_path):
182
    """
183
       Returns False on error, or True in case of successful bash scripts
184
       run."""
185
    formatting['output_template'] = _BASH_TEMPLATE
186
    send_arf_to_remote_machine_and_generate_remediations_there(
187
        run_type, test_env, formatting, verbose_path)
188
    if not get_file_remote(test_env, verbose_path, LogHelper.LOG_DIR,
189
                           '/' + formatting['output_file']):
190
        return False
191
192
    command_string = '/bin/bash -x /{output_file}'.format(** formatting)
193
194
    with open(verbose_path, "a") as log_file:
195
        try:
196
            test_env.execute_ssh_command(command_string, log_file)
197
        except Exception as exc:
198
            msg = (
199
                'Bash script remediation run has exited with return code {} '
200
                'instead of expected 0'.format(exc.returncode))
201
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
202
            return False
203
    return True
204
205
206
def send_arf_to_remote_machine_and_generate_remediations_there(
207
        run_type, test_env, formatting, verbose_path):
208
    if run_type == 'rule':
209
        try:
210
            res_id = get_result_id_from_arf(formatting['source_arf'], verbose_path)
211
        except Exception as exc:
212
            logging.error(str(exc))
213
            return False
214
        formatting['result_id'] = res_id
215
216
    with open(verbose_path, "a") as log_file:
217
        try:
218
            test_env.scp_upload_file(formatting["source_arf"], "/", log_file)
219
        except Exception:
220
            return False
221
222
    try:
223
        generate_fixes_remotely(test_env, formatting, verbose_path)
224
    except Exception as exc:
225
        logging.error(str(exc))
226
        return False
227
228
229
def is_virtual_oscap_profile(profile):
230
    """ Test if the profile belongs to the so called category virtual
231
        from OpenSCAP available profiles. It can be (all) or other id we
232
        might come up in the future, it just needs to be encapsulated
233
        with parenthesis for example "(custom_profile)".
234
    """
235
    if profile is not None:
236
        if profile == OSCAP_PROFILE_ALL_ID:
237
            return True
238
        else:
239
            if "(" == profile[:1] and ")" == profile[-1:]:
240
                return True
241
    return False
242
243
244
def process_profile_id(profile):
245
    # Detect if the profile is virtual and include single quotes if needed.
246
    if is_virtual_oscap_profile(profile):
247
        if PROFILE_ALL_ID_SINGLE_QUOTED:
248
            return "'{}'".format(profile)
249
        else:
250
            return profile
251
    else:
252
        return profile
253
254
255
class GenericRunner(object):
256
    def __init__(self, environment, profile, datastream, benchmark_id):
257
        self.environment = environment
258
        self.profile = profile
259
        self.datastream = datastream
260
        self.benchmark_id = benchmark_id
261
262
        self.arf_basename = ''
263
        self.arf_path = ''
264
        self.verbose_path = ''
265
        self.report_path = ''
266
        self.results_path = ''
267
        self.stage = 'undefined'
268
269
        self.clean_files = False
270
        self.create_reports = True
271
        self.manual_debug = False
272
        self._filenames_to_clean_afterwards = set()
273
274
        self.command_base = []
275
        self.command_options = []
276
        self.command_operands = []
277
        # number of seconds to sleep after reboot of vm to let
278
        # the system to finish startup, there were problems with
279
        # temporary files created by Dracut during image generation interfering
280
        # with the scan
281
        self.time_to_finish_startup = 30
282
283
    def __enter__(self):
284
        return self
285
286
    def __exit__(self, type, value, traceback):
287
        self._remove_files_to_clean()
288
289
    def _make_arf_path(self):
290
        self.arf_basename = self._get_arf_basename()
291
        self.arf_path = os.path.join(LogHelper.LOG_DIR, self.arf_basename)
292
293
    def _get_arf_basename(self):
294
        raise NotImplementedError()
295
296
    def _make_verbose_path(self):
297
        verbose_basename = self._get_verbose_basename()
298
        verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_basename)
299
        self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')
300
301
    def _get_verbose_basename(self):
302
        raise NotImplementedError()
303
304
    def _make_report_path(self):
305
        report_basename = self._get_report_basename()
306
        report_path = os.path.join(LogHelper.LOG_DIR, report_basename)
307
        self.report_path = LogHelper.find_name(report_path, '.html')
308
309
    def _get_report_basename(self):
310
        raise NotImplementedError()
311
312
    def _make_results_path(self):
313
        results_basename = self._get_results_basename()
314
        results_path = os.path.join(LogHelper.LOG_DIR, results_basename)
315
        self.results_path = LogHelper.find_name(results_path, '.xml')
316
317
    def _get_results_basename(self):
318
        raise NotImplementedError()
319
320
    def _generate_report_file(self):
321
        self.command_options.extend([
322
            '--report', self.report_path,
323
        ])
324
        self._filenames_to_clean_afterwards.add(self.report_path)
325
326
    def _wait_for_continue(self):
327
        """ In case user requests to leave machine in failed state for hands
328
        on debugging, ask for keypress to continue."""
329
        input_func("Paused for manual debugging. Continue by pressing return.")
330
331
    def prepare_online_scanning_arguments(self):
332
        self.command_options.extend([
333
            '--benchmark-id', self.benchmark_id,
334
            '--profile', self.profile,
335
            '--progress', '--oval-results',
336
        ])
337
        self.command_operands.append(self.datastream)
338
339
    def _remove_files_to_clean(self):
340
        if self.clean_files:
341
            for fname in tuple(self._filenames_to_clean_afterwards):
342
                try:
343
                    if os.path.exists(fname):
344
                        os.remove(fname)
345
                except OSError:
346
                    logging.error(
347
                        "Failed to cleanup file '{0}'"
348
                        .format(fname))
349
                finally:
350
                    self._filenames_to_clean_afterwards.remove(fname)
351
352
    def run_stage(self, stage):
353
        self.stage = stage
354
355
        self._make_verbose_path()
356
        self._make_report_path()
357
        self._make_arf_path()
358
        self._make_results_path()
359
360
        self.command_base = []
361
        self.command_options = ['--verbose', 'DEVEL']
362
        self.command_operands = []
363
364
        result = None
365
        if stage == 'initial':
366
            result = self.initial()
367
        elif stage == 'remediation':
368
            result = self.remediation()
369
        elif stage == 'final':
370
            result = self.final()
371
        else:
372
            raise RuntimeError('Unknown stage: {}.'.format(stage))
373
374
        self._remove_files_to_clean()
375
376
        if result == 1:
377
            LogHelper.log_preloaded('pass')
378
            if self.clean_files:
379
                self._filenames_to_clean_afterwards.add(self.verbose_path)
380
                if stage in ['initial', 'remediation', 'final']:
381
                    # We need the initial ARF so we can generate the remediation out of it later
382
                    self._filenames_to_clean_afterwards.add(self.arf_path)
383
384
        elif result == 2:
385
            LogHelper.log_preloaded('notapplicable')
386
        else:
387
            LogHelper.log_preloaded('fail')
388
            if self.manual_debug:
389
                self._wait_for_continue()
390
        return result
391
392
    @property
393
    def get_command(self):
394
        return self.command_base + self.command_options + self.command_operands
395
396
    def make_oscap_call(self):
397
        raise NotImplementedError()
398
399
    def initial(self):
400
        if self.create_reports and "--results-arf" not in self.command_options:
401
            self.command_options += ['--results-arf', self.arf_path]
402
        result = self.make_oscap_call()
403
        return result
404
405
    def remediation(self):
406
        raise NotImplementedError()
407
408
    def final(self):
409
        if self.create_reports and "--results-arf" not in self.command_options:
410
            self.command_options += ['--results-arf', self.arf_path]
411
        result = self.make_oscap_call()
412
        return result
413
414
    def analyze(self, stage):
415
        triaged_results = triage_xml_results(self.results_path)
416
        triaged_results["stage"] = stage
417
        triaged_results["runner"] = self.__class__.__name__
418
        return triaged_results
419
420
    def _get_formatting_dict_for_remediation(self):
421
        formatting = {
422
            'domain_ip': self.environment.domain_ip,
423
            'profile': self.profile,
424
            'datastream': self.datastream,
425
            'benchmark_id': self.benchmark_id
426
        }
427
        formatting['source_arf'] = self._get_initial_arf_path()
428
        formatting['source_arf_basename'] = os.path.basename(formatting['source_arf'])
429
        return formatting
430
431
432
class ProfileRunner(GenericRunner):
433
    def _get_arf_basename(self, stage=None):
434
        if stage is None:
435
            stage = self.stage
436
        return '{0}-{1}-arf.xml'.format(self.profile, stage)
437
438
    def _get_initial_arf_path(self):
439
        return os.path.join(LogHelper.LOG_DIR, self._get_arf_basename("initial"))
440
441
    def _get_verbose_basename(self):
442
        return '{0}-{1}'.format(self.profile, self.stage)
443
444
    def _get_report_basename(self):
445
        return '{0}-{1}'.format(self.profile, self.stage)
446
447
    def _get_results_basename(self):
448
        return '{0}-{1}-results'.format(self.profile, self.stage)
449
450
    def final(self):
451
        if self.environment.name == 'libvirt-based':
452
            logging.info("Rebooting domain '{0}' before final scan."
453
                         .format(self.environment.domain_name))
454
            self.environment.reboot()
455
            logging.info("Waiting for {0} seconds to let the system finish startup."
456
                         .format(self.time_to_finish_startup))
457
            time.sleep(self.time_to_finish_startup)
458
        return GenericRunner.final(self)
459
460
    def make_oscap_call(self):
461
        self.prepare_online_scanning_arguments()
462
        self._generate_report_file()
463
        returncode, self._oscap_output = self.environment.scan(
464
            self.command_options + self.command_operands, self.verbose_path)
465
466
        if self.create_reports:
467
            self.environment.arf_to_html(self.arf_path)
468
469
        if returncode not in [0, 2]:
470
            logging.error(('Profile run should end with return code 0 or 2 '
471
                           'not "{0}" as it did!').format(returncode))
472
            return False
473
        return True
474
475
476
class RuleRunner(GenericRunner):
477
    def __init__(
478
            self, environment, profile, datastream, benchmark_id,
479
            rule_id, script_name, dont_clean, no_reports, manual_debug):
480
        super(RuleRunner, self).__init__(
481
            environment, profile, datastream, benchmark_id,
482
        )
483
484
        self.rule_id = rule_id
485
        self.short_rule_id = re.sub(r'.*content_rule_', '', self.rule_id)
486
        self.context = None
487
        self.script_name = script_name
488
        self.clean_files = not dont_clean
489
        self.create_reports = not no_reports
490
        self.manual_debug = manual_debug
491
492
        self._oscap_output = ''
493
494
    def _get_arf_basename(self, stage=None):
495
        if stage is None:
496
            stage = self.stage
497
        return '{0}-{1}-{2}-arf.xml'.format(self.short_rule_id, self.script_name, stage)
498
499
    def _get_initial_arf_path(self):
500
        return os.path.join(LogHelper.LOG_DIR, self._get_arf_basename("initial"))
501
502
    def _get_verbose_basename(self):
503
        return '{0}-{1}-{2}'.format(self.short_rule_id, self.script_name, self.stage)
504
505
    def _get_report_basename(self):
506
        return '{0}-{1}-{2}'.format(self.short_rule_id, self.script_name, self.stage)
507
508
    def _get_results_basename(self):
509
        return '{0}-{1}-{2}-results-{3}'.format(
510
            self.short_rule_id, self.script_name, self.profile, self.stage)
511
512
    def make_oscap_call(self):
513
        self.prepare_online_scanning_arguments()
514
        if self.create_reports:
515
            self._generate_report_file()
516
        self.command_options.extend(
517
            ['--rule', self.rule_id])
518
        returncode, self._oscap_output = self.environment.scan(
519
            self.command_options + self.command_operands, self.verbose_path)
520
521
        if self.create_reports:
522
            self.environment.arf_to_html(self.arf_path)
523
524
        return self._analyze_output_of_oscap_call()
525
526
    def final(self):
527
        success = super(RuleRunner, self).final()
528
        success = success and self._analyze_output_of_oscap_call()
529
530
        return success
531
532
    def _find_rule_result_in_output(self):
533
        # oscap --progress options outputs rule results to stdout in
534
        # following format:
535
        # xccdf_org....rule_accounts_password_minlen_login_defs:pass
536
        match = re.findall('{0}:(.*)$'.format(self.rule_id),
537
                           self._oscap_output,
538
                           re.MULTILINE)
539
540
        if not match:
541
            # When the rule is not selected, it won't match in output
542
            return "notselected"
543
544
        # When --remediation is executed, there will be two entries in
545
        # progress output, one for fail, and one for fixed, e.g.
546
        # xccdf_org....rule_accounts_password_minlen_login_defs:fail
547
        # xccdf_org....rule_accounts_password_minlen_login_defs:fixed
548
        # We are interested in the last one
549
        return match[-1]
550
551
    def _analyze_output_of_oscap_call(self):
552
        local_success = 1
553
        # check expected result
554
        rule_result = self._find_rule_result_in_output()
555
556
        if rule_result == "notapplicable":
557
            msg = (
558
                'Rule {0} evaluation resulted in {1}'
559
                .format(self.rule_id, rule_result))
560
            LogHelper.preload_log(logging.WARNING, msg, 'notapplicable')
561
            local_success = 2
562
            return local_success
563
        if rule_result != self.context:
564
            local_success = 0
565
            if rule_result == 'notselected':
566
                msg = (
567
                    'Rule {0} has not been evaluated! '
568
                    'Wrong profile selected in test scenario?'
569
                    .format(self.rule_id))
570
            else:
571
                msg = (
572
                    'Rule evaluation resulted in {0}, '
573
                    'instead of expected {1} during {2} stage '
574
                    .format(rule_result, self.context, self.stage)
575
                )
576
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
577
        return local_success
578
579
    def _get_formatting_dict_for_remediation(self):
580
        fmt = super(RuleRunner, self)._get_formatting_dict_for_remediation()
581
        fmt['rule_id'] = self.rule_id
582
583
        return fmt
584
585
    def run_stage_with_context(self, stage, context):
586
        self.context = context
587
        return self.run_stage(stage)
588
589
590
class OscapProfileRunner(ProfileRunner):
591
    def remediation(self):
592
        self.command_options += ['--remediate']
593
        return self.make_oscap_call()
594
595
596
class AnsibleProfileRunner(ProfileRunner):
597
    def initial(self):
598
        self.command_options += ['--results-arf', self.arf_path]
599
        return super(AnsibleProfileRunner, self).initial()
600
601
    def remediation(self):
602
        formatting = self._get_formatting_dict_for_remediation()
603
        formatting['output_file'] = '{0}.yml'.format(self.profile)
604
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
605
                                              formatting['output_file'])
606
607
        return run_stage_remediation_ansible('profile', self.environment,
608
                                             formatting,
609
                                             self.verbose_path)
610
611
612
class BashProfileRunner(ProfileRunner):
613
    def initial(self):
614
        self.command_options += ['--results-arf', self.arf_path]
615
        return super(BashProfileRunner, self).initial()
616
617
    def remediation(self):
618
        formatting = self._get_formatting_dict_for_remediation()
619
        formatting['output_file'] = '{0}.sh'.format(self.profile)
620
621
        return run_stage_remediation_bash('profile', self.environment, formatting, self.verbose_path)
622
623
624
class OscapRuleRunner(RuleRunner):
625
    def remediation(self):
626
        self.command_options += ['--remediate']
627
        self.command_options += ['--results-arf', self.arf_path]
628
        return self.make_oscap_call()
629
630
    def final(self):
631
        """ There is no need to run final scan again - result won't be different
632
        to what we already have in remediation step."""
633
        return True
634
635
636
class BashRuleRunner(RuleRunner):
637
    def initial(self):
638
        self.command_options += ['--results-arf', self.arf_path]
639
        return super(BashRuleRunner, self).initial()
640
641
    def remediation(self):
642
643
        formatting = self._get_formatting_dict_for_remediation()
644
        formatting['output_file'] = '{0}.sh'.format(self.rule_id)
645
646
        success = run_stage_remediation_bash('rule', self.environment, formatting, self.verbose_path)
647
        return success
648
649
650
class AnsibleRuleRunner(RuleRunner):
651
    def initial(self):
652
        self.command_options += ['--results-arf', self.arf_path]
653
        return super(AnsibleRuleRunner, self).initial()
654
655
    def remediation(self):
656
        formatting = self._get_formatting_dict_for_remediation()
657
        formatting['output_file'] = '{0}.yml'.format(self.rule_id)
658
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
659
                                              formatting['output_file'])
660
661
        success = run_stage_remediation_ansible('rule', self.environment, formatting, self.verbose_path)
662
        return success
663
664
665
class Checker(object):
666
    def __init__(self, test_env):
667
        self.test_env = test_env
668
        self.executed_tests = 0
669
670
        self.datastream = ""
671
        self.benchmark_id = ""
672
        self.remediate_using = ""
673
        self.benchmark_cpes = set()
674
675
        now = datetime.datetime.now()
676
        self.test_timestamp_str = now.strftime("%Y-%m-%d %H:%M")
677
678
    def test_target(self, target):
679
        self.start()
680
        try:
681
            self._test_target(target)
682
        except KeyboardInterrupt:
683
            logging.info("Terminating the test run due to keyboard interrupt.")
684
        except RuntimeError as exc:
685
            logging.error("Terminating due to error: {msg}.".format(msg=str(exc)))
686
        except TimeoutException as exc:
687
            logging.error("Terminating due to timeout: {msg}".format(msg=str(exc)))
688
        finally:
689
            self.finalize()
690
691
    def run_test_for_all_profiles(self, profiles, test_data=None):
692
        if len(profiles) > 1:
693
            with test_env.SavedState.create_from_environment(self.test_env, "prepared") as state:
694
                args_list = [(p, test_data) for p in profiles]
695
                state.map_on_top(self._run_test, args_list)
696
        elif profiles:
697
            self._run_test(profiles[0], test_data)
698
699
    def _test_target(self, target):
700
        raise NotImplementedError()
701
702
    def _run_test(self, profile, test_data):
703
        raise NotImplementedError()
704
705
    def start(self):
706
        self.executed_tests = 0
707
708
        try:
709
            self.test_env.start()
710
        except Exception as exc:
711
            msg = ("Failed to start test environment '{0}': {1}"
712
                   .format(self.test_env.name, str(exc)))
713
            raise RuntimeError(msg)
714
715
    def finalize(self):
716
        if not self.executed_tests:
717
            logging.warning("Nothing has been tested!")
718
719
        try:
720
            self.test_env.finalize()
721
        except Exception as exc:
722
            msg = ("Failed to finalize test environment '{0}': {1}"
723
                   .format(self.test_env.name, str(exc)))
724
            raise RuntimeError(msg)
725
726
727
REMEDIATION_PROFILE_RUNNERS = {
728
    'oscap': OscapProfileRunner,
729
    'bash': BashProfileRunner,
730
    'ansible': AnsibleProfileRunner,
731
}
732
733
734
REMEDIATION_RULE_RUNNERS = {
735
    'oscap': OscapRuleRunner,
736
    'bash': BashRuleRunner,
737
    'ansible': AnsibleRuleRunner,
738
}
739
740
741
REMEDIATION_RUNNER_TO_REMEDIATION_MEANS = {
742
    'oscap': 'bash',
743
    'bash': 'bash',
744
    'ansible': 'ansible',
745
}
746