Test Failed
Push — master ( c00d48...56fe87 )
by Matěj
03:05 queued 11s
created

ssg_test_suite.oscap.process_profile_id()   A

Complexity

Conditions 3

Size

Total Lines 9
Code Lines 6

Duplication

Lines 0
Ratio 0 %

Code Coverage

Tests 0
CRAP Score 12

Importance

Changes 0
Metric Value
cc 3
eloc 6
nop 1
dl 0
loc 9
ccs 0
cts 6
cp 0
crap 12
rs 10
c 0
b 0
f 0
1
#!/usr/bin/env python2
2
from __future__ import print_function
3
4
import logging
5
import os.path
6
import re
7
import collections
8
import xml.etree.ElementTree
9
import json
10
import datetime
11
12
13
from ssg.constants import OSCAP_PROFILE_ALL_ID
14
15
from ssg_test_suite.log import LogHelper
16
from ssg_test_suite import test_env
17
from ssg_test_suite import common
18
19
from ssg.shims import input_func
20
21
logging.getLogger(__name__).addHandler(logging.NullHandler())
22
23
_CONTEXT_RETURN_CODES = {'pass': 0,
24
                         'fail': 2,
25
                         'error': 1,
26
                         'notapplicable': 0,
27
                         'fixed': 0}
28
29
_ANSIBLE_TEMPLATE = 'urn:xccdf:fix:script:ansible'
30
_BASH_TEMPLATE = 'urn:xccdf:fix:script:sh'
31
_XCCDF_NS = 'http://checklists.nist.gov/xccdf/1.2'
32
33
34
PROFILE_ALL_ID_SINGLE_QUOTED = False
35
36
37
def analysis_to_serializable(analysis):
38
    result = dict(analysis)
39
    for key, value in analysis.items():
40
        if type(value) == set:
41
            result[key] = tuple(value)
42
    return result
43
44
45
def save_analysis_to_json(analysis, output_fname):
46
    analysis2 = analysis_to_serializable(analysis)
47
    with open(output_fname, "w") as f:
48
        json.dump(analysis2, f)
49
50
51
def triage_xml_results(fname):
52
    tree = xml.etree.ElementTree.parse(fname)
53
    all_xml_results = tree.findall(".//{%s}rule-result" % _XCCDF_NS)
54
55
    triaged = collections.defaultdict(set)
56
    for result in list(all_xml_results):
57
        idref = result.get("idref")
58
        status = result.find("{%s}result" % _XCCDF_NS).text
59
        triaged[status].add(idref)
60
61
    return triaged
62
63
64
def send_files_remote(verbose_path, remote_dir, domain_ip, *files):
65
    """Upload files to VM."""
66
    # files is a list of absolute paths on the host
67
    success = True
68
    destination = 'root@{0}:{1}'.format(domain_ip, remote_dir)
69
    files_string = ' '.join(files)
70
71
    logging.debug('Uploading files {0} to {1}'.format(files_string,
72
                                                      destination))
73
    command = ['scp'] + list(common.SSH_ADDITIONAL_OPTS) + list(files) + [destination]
74
    if common.run_cmd_local(command, verbose_path)[0] != 0:
75
        logging.error('Failed to upload files {0}'.format(files_string))
76
        success = False
77
    return success
78
79
80
def get_file_remote(verbose_path, local_dir, domain_ip, remote_path):
81
    """Download a file from VM."""
82
    # remote_path is an absolute path of a file on remote machine
83
    success = True
84
    source = 'root@{0}:{1}'.format(domain_ip, remote_path)
85
    logging.debug('Downloading file {0} to {1}'
86
                  .format(source, local_dir))
87
    command = ['scp'] + list(common.SSH_ADDITIONAL_OPTS) + [source, local_dir]
88
    if common.run_cmd_local(command, verbose_path)[0] != 0:
89
        logging.error('Failed to download file {0}'.format(remote_path))
90
        success = False
91
    return success
92
93
94
def find_result_id_in_output(output):
95
    match = re.search('result id.*$', output, re.IGNORECASE | re.MULTILINE)
96
    if match is None:
97
        return None
98
    # Return the right most word of the match which is the result id.
99
    return match.group(0).split()[-1]
100
101
102
def get_result_id_from_arf(arf_path, verbose_path):
103
    command = ['oscap', 'info', arf_path]
104
    command_string = ' '.join(command)
105
    returncode, output = common.run_cmd_local(command, verbose_path)
106
    if returncode != 0:
107
        raise RuntimeError('{0} returned {1} exit code'.
108
                           format(command_string, returncode))
109
    res_id = find_result_id_in_output(output)
110
    if res_id is None:
111
        raise RuntimeError('Failed to find result ID in {0}'
112
                           .format(arf_path))
113
    return res_id
114
115
116
def single_quote_string(input):
117
    result = input
118
    for char in "\"'":
119
        result = result.replace(char, "")
120
    return "'{}'".format(result)
121
122
123
def generate_fixes_remotely(formatting, verbose_path):
124
    command_base = ['oscap', 'xccdf', 'generate', 'fix']
125
    command_options = [
126
        '--benchmark-id', formatting['benchmark_id'],
127
        '--profile', formatting['profile'],
128
        '--template', formatting['output_template'],
129
        '--output', '/{output_file}'.format(** formatting),
130
    ]
131
    command_operands = ['/{arf_file}'.format(** formatting)]
132
    if 'result_id' in formatting:
133
        command_options.extend(['--result-id', formatting['result_id']])
134
135
    command_components = command_base + command_options + command_operands
136
    command_string = ' '.join([single_quote_string(c) for c in command_components])
137
    rc, stdout = common.run_cmd_remote(
138
        command_string, formatting['domain_ip'], verbose_path)
139
    if rc != 0:
140
        msg = ('Command {0} ended with return code {1} (expected 0).'
141
               .format(command_string, rc))
142
        raise RuntimeError(msg)
143
144
145
def run_stage_remediation_ansible(run_type, formatting, verbose_path):
146
    """
147
       Returns False on error, or True in case of successful bash scripts
148
       run."""
149
    formatting['output_template'] = _ANSIBLE_TEMPLATE
150
    send_arf_to_remote_machine_and_generate_remediations_there(
151
        run_type, formatting, verbose_path)
152
    if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
153
                           formatting['domain_ip'],
154
                           '/' + formatting['output_file']):
155
        return False
156
    command = (
157
        'ansible-playbook', '-v', '-i', '{0},'.format(formatting['domain_ip']),
158
        '-u' 'root', '--ssh-common-args={0}'.format(' '.join(list(common.SSH_ADDITIONAL_OPTS))),
159
        formatting['playbook'])
160
    command_string = ' '.join(command)
161
    returncode, output = common.run_cmd_local(command, verbose_path)
162
    # Appends output of ansible-playbook to the verbose_path file.
163
    with open(verbose_path, 'a') as f:
164
        f.write('Stdout of "{}":'.format(command_string))
165
        f.write(output)
166
    if returncode != 0:
167
        msg = (
168
            'Ansible playbook remediation run has '
169
            'exited with return code {} instead of expected 0'
170
            .format(returncode))
171
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
172
        return False
173
    return True
174
175
176
def run_stage_remediation_bash(run_type, formatting, verbose_path):
177
    """
178
       Returns False on error, or True in case of successful Ansible playbook
179
       run."""
180
    formatting['output_template'] = _BASH_TEMPLATE
181
    send_arf_to_remote_machine_and_generate_remediations_there(
182
        run_type, formatting, verbose_path)
183
    if not get_file_remote(verbose_path, LogHelper.LOG_DIR,
184
                           formatting['domain_ip'],
185
                           '/' + formatting['output_file']):
186
        return False
187
188
    command_string = '/bin/bash -x /{output_file}'.format(** formatting)
189
    returncode, output = common.run_cmd_remote(
190
        command_string, formatting['domain_ip'], verbose_path)
191
    # Appends output of script execution to the verbose_path file.
192
    with open(verbose_path, 'a') as f:
193
        f.write('Stdout of "{}":'.format(command_string))
194
        f.write(output)
195
    if returncode != 0:
196
        msg = (
197
            'Bash script remediation run has exited with return code {} '
198
            'instead of expected 0'.format(returncode))
199
        LogHelper.preload_log(logging.ERROR, msg, 'fail')
200
        return False
201
    return True
202
203
204
def send_arf_to_remote_machine_and_generate_remediations_there(
205
        run_type, formatting, verbose_path):
206
    if run_type == 'rule':
207
        try:
208
            res_id = get_result_id_from_arf(formatting['arf'], verbose_path)
209
        except Exception as exc:
210
            logging.error(str(exc))
211
            return False
212
        formatting['result_id'] = res_id
213
214
    if not send_files_remote(
215
            verbose_path, '/', formatting['domain_ip'], formatting['arf']):
216
        return False
217
218
    try:
219
        generate_fixes_remotely(formatting, verbose_path)
220
    except Exception as exc:
221
        logging.error(str(exc))
222
        return False
223
224
225
def is_virtual_oscap_profile(profile):
226
    """ Test if the profile belongs to the so called category virtual
227
        from OpenSCAP available profiles. It can be (all) or other id we
228
        might come up in the future, it just needs to be encapsulated
229
        with parenthesis for example "(custom_profile)".
230
    """
231
    if profile is not None:
232
        if profile == OSCAP_PROFILE_ALL_ID:
233
            return True
234
        else:
235
            if "(" == profile[:1] and ")" == profile[-1:]:
236
                return True
237
    return False
238
239
240
def process_profile_id(profile):
241
    # Detect if the profile is virtual and include single quotes if needed.
242
    if is_virtual_oscap_profile(profile):
243
        if PROFILE_ALL_ID_SINGLE_QUOTED:
244
            return "'{}'".format(profile)
245
        else:
246
            return profile
247
    else:
248
        return profile
249
250
251
class GenericRunner(object):
252
    def __init__(self, environment, profile, datastream, benchmark_id):
253
        self.environment = environment
254
        self.profile = profile
255
        self.datastream = datastream
256
        self.benchmark_id = benchmark_id
257
258
        self.arf_file = ''
259
        self.arf_path = ''
260
        self.verbose_path = ''
261
        self.report_path = ''
262
        self.results_path = ''
263
        self.stage = 'undefined'
264
265
        self.clean_files = False
266
        self.manual_debug = False
267
        self._filenames_to_clean_afterwards = set()
268
269
        self.command_base = []
270
        self.command_options = []
271
        self.command_operands = []
272
273
    def _make_arf_path(self):
274
        self.arf_file = self._get_arf_file()
275
        self.arf_path = os.path.join(LogHelper.LOG_DIR, self.arf_file)
276
277
    def _get_arf_file(self):
278
        raise NotImplementedError()
279
280
    def _make_verbose_path(self):
281
        verbose_file = self._get_verbose_file()
282
        verbose_path = os.path.join(LogHelper.LOG_DIR, verbose_file)
283
        self.verbose_path = LogHelper.find_name(verbose_path, '.verbose.log')
284
285
    def _get_verbose_file(self):
286
        raise NotImplementedError()
287
288
    def _make_report_path(self):
289
        report_file = self._get_report_file()
290
        report_path = os.path.join(LogHelper.LOG_DIR, report_file)
291
        self.report_path = LogHelper.find_name(report_path, '.html')
292
293
    def _get_report_file(self):
294
        raise NotImplementedError()
295
296
    def _make_results_path(self):
297
        results_file = self._get_results_file()
298
        results_path = os.path.join(LogHelper.LOG_DIR, results_file)
299
        self.results_path = LogHelper.find_name(results_path, '.xml')
300
301
    def _get_results_file(self):
302
        raise NotImplementedError()
303
304
    def _generate_report_file(self):
305
        self.command_options.extend([
306
            '--report', self.report_path,
307
        ])
308
        self._filenames_to_clean_afterwards.add(self.report_path)
309
310
    def _wait_for_continue(self):
311
        """ In case user requests to leave machine in failed state for hands
312
        on debugging, ask for keypress to continue."""
313
        input_func("Paused for manual debugging. Continue by pressing return.")
314
315
    def prepare_online_scanning_arguments(self):
316
        self.command_options.extend([
317
            '--benchmark-id', self.benchmark_id,
318
            '--profile', self.profile,
319
            '--progress', '--oval-results',
320
        ])
321
        self.command_operands.append(self.datastream)
322
323
    def run_stage(self, stage):
324
        self.stage = stage
325
326
        self._make_verbose_path()
327
        self._make_report_path()
328
        self._make_arf_path()
329
        self._make_results_path()
330
331
        self.command_base = []
332
        self.command_options = ['--verbose', 'DEVEL']
333
        self.command_operands = []
334
335
        result = None
336
        if stage == 'initial':
337
            result = self.initial()
338
        elif stage == 'remediation':
339
            result = self.remediation()
340
        elif stage == 'final':
341
            result = self.final()
342
        else:
343
            raise RuntimeError('Unknown stage: {}.'.format(stage))
344
345
        if self.clean_files:
346
            for fname in tuple(self._filenames_to_clean_afterwards):
347
                try:
348
                    os.remove(fname)
349
                except OSError as exc:
350
                    logging.error(
351
                        "Failed to cleanup file '{0}'"
352
                        .format(fname))
353
                finally:
354
                    self._filenames_to_clean_afterwards.remove(fname)
355
356
        if result:
357
            LogHelper.log_preloaded('pass')
358
        else:
359
            LogHelper.log_preloaded('fail')
360
            if self.manual_debug:
361
                self._wait_for_continue()
362
        return result
363
364
    @property
365
    def get_command(self):
366
        return self.command_base + self.command_options + self.command_operands
367
368
    def make_oscap_call(self):
369
        raise NotImplementedError()
370
371
    def initial(self):
372
        self.command_options += ['--results', self.results_path]
373
        result = self.make_oscap_call()
374
        return result
375
376
    def remediation(self):
377
        raise NotImplementedError()
378
379
    def final(self):
380
        self.command_options += ['--results', self.results_path]
381
        result = self.make_oscap_call()
382
        return result
383
384
    def analyze(self, stage):
385
        triaged_results = triage_xml_results(self.results_path)
386
        triaged_results["stage"] = stage
387
        triaged_results["runner"] = self.__class__.__name__
388
        return triaged_results
389
390
    def _get_formatting_dict_for_remediation(self):
391
        formatting = {
392
            'domain_ip': self.environment.domain_ip,
393
            'profile': self.profile,
394
            'datastream': self.datastream,
395
            'benchmark_id': self.benchmark_id
396
        }
397
        formatting['arf'] = self.arf_path
398
        formatting['arf_file'] = self.arf_file
399
        return formatting
400
401
402
class ProfileRunner(GenericRunner):
403
    def _get_arf_file(self):
404
        return '{0}-initial-arf.xml'.format(self.profile)
405
406
    def _get_verbose_file(self):
407
        return '{0}-{1}'.format(self.profile, self.stage)
408
409
    def _get_report_file(self):
410
        return '{0}-{1}'.format(self.profile, self.stage)
411
412
    def _get_results_file(self):
413
        return '{0}-{1}-results'.format(self.profile, self.stage)
414
415
    def make_oscap_call(self):
416
        self.prepare_online_scanning_arguments()
417
        self._generate_report_file()
418
        returncode, self._oscap_output = self.environment.scan(
419
            self.command_options + self.command_operands, self.verbose_path)
420
421
        if returncode not in [0, 2]:
422
            logging.error(('Profile run should end with return code 0 or 2 '
423
                           'not "{0}" as it did!').format(returncode))
424
            return False
425
        return True
426
427
428
class RuleRunner(GenericRunner):
429
    def __init__(
430
            self, environment, profile, datastream, benchmark_id,
431
            rule_id, script_name, dont_clean, manual_debug):
432
        super(RuleRunner, self).__init__(
433
            environment, profile, datastream, benchmark_id,
434
        )
435
436
        self.rule_id = rule_id
437
        self.context = None
438
        self.script_name = script_name
439
        self.clean_files = not dont_clean
440
        self.manual_debug = manual_debug
441
442
        self._oscap_output = ''
443
444
    def _get_arf_file(self):
445
        return '{0}-initial-arf.xml'.format(self.rule_id)
446
447
    def _get_verbose_file(self):
448
        return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage)
449
450
    def _get_report_file(self):
451
        return '{0}-{1}-{2}'.format(self.rule_id, self.script_name, self.stage)
452
453
    def _get_results_file(self):
454
        return '{0}-{1}-{2}-results-{3}'.format(
455
            self.rule_id, self.script_name, self.profile, self.stage)
456
457
    def make_oscap_call(self):
458
        self.prepare_online_scanning_arguments()
459
        self._generate_report_file()
460
        self.command_options.extend(
461
            ['--rule', self.rule_id])
462
        returncode, self._oscap_output = self.environment.scan(
463
            self.command_options + self.command_operands, self.verbose_path)
464
465
        return self._analyze_output_of_oscap_call()
466
467
    def final(self):
468
        success = super(RuleRunner, self).final()
469
        success = success and self._analyze_output_of_oscap_call()
470
471
        return success
472
473
    def _find_rule_result_in_output(self):
474
        # oscap --progress options outputs rule results to stdout in
475
        # following format:
476
        # xccdf_org....rule_accounts_password_minlen_login_defs:pass
477
        match = re.findall('{0}:(.*)$'.format(self.rule_id),
478
                           self._oscap_output,
479
                           re.MULTILINE)
480
481
        if not match:
482
            # When the rule is not selected, it won't match in output
483
            return "notselected"
484
485
        # When --remediation is executed, there will be two entries in
486
        # progress output, one for fail, and one for fixed, e.g.
487
        # xccdf_org....rule_accounts_password_minlen_login_defs:fail
488
        # xccdf_org....rule_accounts_password_minlen_login_defs:fixed
489
        # We are interested in the last one
490
        return match[-1]
491
492
    def _analyze_output_of_oscap_call(self):
493
        local_success = True
494
        # check expected result
495
        rule_result = self._find_rule_result_in_output()
496
497
        if rule_result != self.context:
498
            local_success = False
499
            if rule_result is 'notselected':
500
                msg = (
501
                    'Rule {0} has not been evaluated! '
502
                    'Wrong profile selected in test scenario?'
503
                    .format(self.rule_id))
504
            else:
505
                msg = (
506
                    'Rule evaluation resulted in {0}, '
507
                    'instead of expected {1} during {2} stage '
508
                    .format(rule_result, self.context, self.stage)
509
                )
510
            LogHelper.preload_log(logging.ERROR, msg, 'fail')
511
        return local_success
512
513
    def _get_formatting_dict_for_remediation(self):
514
        fmt = super(RuleRunner, self)._get_formatting_dict_for_remediation()
515
        fmt['rule_id'] = self.rule_id
516
517
        return fmt
518
519
    def run_stage_with_context(self, stage, context):
520
        self.context = context
521
        return self.run_stage(stage)
522
523
524
class OscapProfileRunner(ProfileRunner):
525
    def remediation(self):
526
        self.command_options += ['--remediate']
527
        return self.make_oscap_call()
528
529
530
class AnsibleProfileRunner(ProfileRunner):
531
    def initial(self):
532
        self.command_options += ['--results-arf', self.arf_path]
533
        return super(AnsibleProfileRunner, self).initial()
534
535
    def remediation(self):
536
        formatting = self._get_formatting_dict_for_remediation()
537
        formatting['output_file'] = '{0}.yml'.format(self.profile)
538
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
539
                                              formatting['output_file'])
540
541
        return run_stage_remediation_ansible('profile',
542
                                             formatting,
543
                                             self.verbose_path)
544
545
546
class BashProfileRunner(ProfileRunner):
547
    def initial(self):
548
        self.command_options += ['--results-arf', self.arf_path]
549
        return super(BashProfileRunner, self).initial()
550
551
    def remediation(self):
552
        formatting = self._get_formatting_dict_for_remediation()
553
        formatting['output_file'] = '{0}.sh'.format(self.profile)
554
555
        return run_stage_remediation_bash('profile', formatting, self.verbose_path)
556
557
558
class OscapRuleRunner(RuleRunner):
559
    def remediation(self):
560
        self.command_options += ['--remediate']
561
        return self.make_oscap_call()
562
563
    def final(self):
564
        """ There is no need to run final scan again - result won't be different
565
        to what we already have in remediation step."""
566
        return True
567
568
569
class BashRuleRunner(RuleRunner):
570
    def initial(self):
571
        self.command_options += ['--results-arf', self.arf_path]
572
        return super(BashRuleRunner, self).initial()
573
574
    def remediation(self):
575
576
        formatting = self._get_formatting_dict_for_remediation()
577
        formatting['output_file'] = '{0}.sh'.format(self.rule_id)
578
579
        success = run_stage_remediation_bash('rule', formatting, self.verbose_path)
580
        return success
581
582
583
class AnsibleRuleRunner(RuleRunner):
584
    def initial(self):
585
        self.command_options += ['--results-arf', self.arf_path]
586
        return super(AnsibleRuleRunner, self).initial()
587
588
    def remediation(self):
589
        formatting = self._get_formatting_dict_for_remediation()
590
        formatting['output_file'] = '{0}.yml'.format(self.rule_id)
591
        formatting['playbook'] = os.path.join(LogHelper.LOG_DIR,
592
                                              formatting['output_file'])
593
594
        success = run_stage_remediation_ansible('rule', formatting, self.verbose_path)
595
        return success
596
597
598
class Checker(object):
599
    def __init__(self, test_env):
600
        self.test_env = test_env
601
        self.executed_tests = 0
602
603
        self.datastream = ""
604
        self.benchmark_id = ""
605
        self.remediate_using = ""
606
        self.benchmark_cpes = set()
607
608
        now = datetime.datetime.now()
609
        self.test_timestamp_str = now.strftime("%Y-%m-%d %H:%M")
610
611
    def test_target(self, target):
612
        self.start()
613
        try:
614
            self._test_target(target)
615
        except KeyboardInterrupt:
616
            logging.info("Terminating the test run due to keyboard interrupt.")
617
        except RuntimeError as exc:
618
            logging.error("Terminating due to error: {msg}.".format(msg=str(exc)))
619
        finally:
620
            self.finalize()
621
622
    def run_test_for_all_profiles(self, profiles, test_data=None):
623
        if len(profiles) > 1:
624
            with test_env.SavedState.create_from_environment(self.test_env, "prepared") as state:
625
                args_list = [(p, test_data) for p in profiles]
626
                state.map_on_top(self._run_test, args_list)
627
        elif profiles:
628
            self._run_test(profiles[0], test_data)
629
630
    def _test_target(self, target):
631
        raise NotImplementedError()
632
633
    def _run_test(self, profile, test_data):
634
        raise NotImplementedError()
635
636
    def start(self):
637
        self.executed_tests = 0
638
639
        try:
640
            self.test_env.start()
641
        except Exception as exc:
642
            msg = ("Failed to start test environment '{0}': {1}"
643
                   .format(self.test_env.name, str(exc)))
644
            raise RuntimeError(msg)
645
646
    def finalize(self):
647
        if not self.executed_tests:
648
            logging.error("Nothing has been tested!")
649
650
        try:
651
            self.test_env.finalize()
652
        except Exception as exc:
653
            msg = ("Failed to finalize test environment '{0}': {1}"
654
                   .format(self.test_env.name, str(exc)))
655
            raise RuntimeError(msg)
656
657
658
REMEDIATION_PROFILE_RUNNERS = {
659
    'oscap': OscapProfileRunner,
660
    'bash': BashProfileRunner,
661
    'ansible': AnsibleProfileRunner,
662
}
663
664
665
REMEDIATION_RULE_RUNNERS = {
666
    'oscap': OscapRuleRunner,
667
    'bash': BashRuleRunner,
668
    'ansible': AnsibleRuleRunner,
669
}
670
671
672
REMEDIATION_RUNNER_TO_REMEDIATION_MEANS = {
673
    'oscap': 'bash',
674
    'bash': 'bash',
675
    'ansible': 'ansible',
676
}
677