Passed
Pull Request — master (#30)
by Jan
09:14
created

tests.unit_tests.test_data_structure.get_report()   A

Complexity

Conditions 1

Size

Total Lines 3
Code Lines 3

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
eloc 3
nop 0
dl 0
loc 3
rs 10
c 0
b 0
f 0
1
import pytest
2
3
from oscap_report.scap_results_parser.data_structures import Remediation
4
from oscap_report.scap_results_parser.exceptions import MissingProcessableRules
5
from tests.unit_tests.test_scap_result_parser import get_parser
6
7
from ..constants import PATH_TO_ARF
8
9
10
def get_report():
11
    parser = get_parser(PATH_TO_ARF)
12
    return parser.parse_report()
13
14
15
def remove_all_rules_by_result(report, result=()):
16
    new_rules = {}
17
    for rule_id, rule in report.rules.items():
18
        if rule.result.lower() not in result:
19
            new_rules[rule_id] = rule
20
    report.rules = new_rules
21
    return report
22
23
24
def remove_all_rules_by_severity(report, severity=()):
25
    new_rules = {}
26
    for rule_id, rule in report.rules.items():
27
        if rule.severity.lower() not in severity:
28
            new_rules[rule_id] = rule
29
    report.rules = new_rules
30
    return report
31
32
33
@pytest.mark.parametrize("to_remove, result", [
34
    (
35
        (),
36
        {"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702}
37
    ),
38
    (
39
        ("fail", "pass", "notchecked", "error", "unknown", "error", "unknown"),
40
        {"raise": True}
41
    ),
42
    (
43
        ("fail", "pass"),
44
        {"fail": 0, "pass": 0, "unknown_error": 0, "other": 69, "sum_of_rules": 69}
45
    ),
46
    (
47
        ("fail", "pass", "notchecked"),
48
        {"raise": True}
49
    ),
50
    (
51
        ("fail"),
52
        {"fail": 0, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 260}
53
    ),
54
    (
55
        ("fail", "notchecked"),
56
        {"fail": 0, "pass": 191, "unknown_error": 0, "other": 0, "sum_of_rules": 191}
57
    ),
58
    (
59
        ("pass"),
60
        {"fail": 442, "pass": 0, "unknown_error": 0, "other": 69, "sum_of_rules": 511}
61
    ),
62
    (
63
        ("pass", "notchecked"),
64
        {"fail": 442, "pass": 0, "unknown_error": 0, "other": 0, "sum_of_rules": 442}
65
    ),
66
    (
67
        ("notchecked"),
68
        {"fail": 442, "pass": 191, "unknown_error": 0, "other": 0, "sum_of_rules": 633}
69
    ),
70
    (
71
        ("error", "unknown"),
72
        {"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702}
73
    ),
74
    (
75
        ("notselected", "notapplicable"),
76
        {"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702}
77
    ),
78
])
79
def test_report_rule_results_stats(to_remove, result):
80
    report = remove_all_rules_by_result(get_report(), to_remove)
81
    if "raise" not in result:
82
        rule_results_stats = report.get_rule_results_stats()
83
        for key in result:
84
            assert result[key] == rule_results_stats[key]
85
    else:
86
        with pytest.raises(MissingProcessableRules):
87
            assert report.get_rule_results_stats()
88
89
90
@pytest.mark.parametrize("to_remove, result", [
91
    (
92
        (),
93
        {"low": 33, "medium": 351, "high": 25, "unknown": 33, "sum_of_filed_rules": 442}
94
    ),
95
    (
96
        ("low", "medium", "high", "unknown"),
97
        {"raise": True}
98
    ),
99
    (
100
        ("fail"),
101
        {"raise": True}
102
    ),
103
    (
104
        ("low"),
105
        {"low": 0, "medium": 351, "high": 25, "unknown": 33, "sum_of_filed_rules": 409}
106
    ),
107
    (
108
        ("medium"),
109
        {"low": 33, "medium": 0, "high": 25, "unknown": 33, "sum_of_filed_rules": 91}
110
    ),
111
    (
112
        ("high"),
113
        {"low": 33, "medium": 351, "high": 0, "unknown": 33, "sum_of_filed_rules": 417}
114
    ),
115
    (
116
        ("unknown"),
117
        {"low": 33, "medium": 351, "high": 25, "unknown": 0, "sum_of_filed_rules": 409}
118
    ),
119
    (
120
        ("low", "medium"),
121
        {"low": 0, "medium": 0, "high": 25, "unknown": 33, "sum_of_filed_rules": 58}
122
    ),
123
    (
124
        ("high", "unknown"),
125
        {"low": 33, "medium": 351, "high": 0, "unknown": 0, "sum_of_filed_rules": 384}
126
    ),
127
    (
128
        ("medium", "high"),
129
        {"low": 33, "medium": 0, "high": 0, "unknown": 33, "sum_of_filed_rules": 66}
130
    ),
131
    (
132
        ("low", "unknown"),
133
        {"low": 0, "medium": 351, "high": 25, "unknown": 0, "sum_of_filed_rules": 376}
134
    ),
135
    (
136
        ("low", "medium", "high"),
137
        {"low": 0, "medium": 0, "high": 0, "unknown": 33, "sum_of_filed_rules": 33}
138
    ),
139
    (
140
        ("low", "medium", "unknown"),
141
        {"low": 0, "medium": 0, "high": 25, "unknown": 0, "sum_of_filed_rules": 25}
142
    ),
143
    (
144
        ("low", "high", "unknown"),
145
        {"low": 0, "medium": 351, "high": 0, "unknown": 0, "sum_of_filed_rules": 351}
146
    ),
147
    (
148
        ("medium", "high", "unknown"),
149
        {"low": 33, "medium": 0, "high": 0, "unknown": 0, "sum_of_filed_rules": 33}
150
    ),
151
])
152
def test_report_severity_of_failed_rules_stats(to_remove, result):
153
    report = None
154
    if "fail" in to_remove:
155
        report = remove_all_rules_by_result(get_report(), to_remove)
156
    else:
157
        report = remove_all_rules_by_severity(get_report(), to_remove)
158
159
    if "raise" not in result:
160
        severity_of_failed_rules_stats = report.get_severity_of_failed_rules_stats()
161
        print(severity_of_failed_rules_stats)
162
        for key in result:
163
            assert result[key] == severity_of_failed_rules_stats[key]
164
    else:
165
        with pytest.raises(MissingProcessableRules):
166
            assert report.get_severity_of_failed_rules_stats()
167
168
169
@pytest.mark.parametrize("system, type_of_remediation", [
170
    ("Unknown_system", "Unknown_system"),
171
    ("urn:xccdf:fix:script:sh", "Shell script"),
172
    ("urn:xccdf:fix:script:ansible", "Ansible snippet"),
173
    ("urn:xccdf:fix:script:puppet", "Puppet snippet"),
174
    ("urn:redhat:anaconda:pre", "Anaconda snippet"),
175
    ("urn:xccdf:fix:script:kubernetes", "Kubernetes snippet"),
176
])
177
def test_remediation_type(system, type_of_remediation):
178
    remediation = Remediation(system=system)
179
    assert remediation.get_type() == type_of_remediation
180