1
|
1 |
|
import logging |
2
|
|
|
|
3
|
1 |
|
import pytest |
4
|
|
|
|
5
|
1 |
|
from openscap_report.scap_results_parser.data_structures.data_structures import \ |
6
|
|
|
Remediation |
7
|
1 |
|
from openscap_report.scap_results_parser.exceptions import \ |
8
|
|
|
MissingProcessableRules |
9
|
1 |
|
from tests.unit_tests.test_scap_result_parser import get_parser |
10
|
|
|
|
11
|
1 |
|
from ..constants import PATH_TO_ARF |
12
|
|
|
|
13
|
|
|
|
14
|
1 |
|
def get_report(): |
15
|
|
|
parser = get_parser(PATH_TO_ARF) |
16
|
|
|
return parser.parse_report() |
17
|
|
|
|
18
|
|
|
|
19
|
1 |
|
def remove_all_rules_by_result(report, result=()): |
20
|
|
|
new_rules = {} |
21
|
|
|
for rule_id, rule in report.rules.items(): |
22
|
|
|
if rule.result.lower() not in result: |
23
|
|
|
new_rules[rule_id] = rule |
24
|
|
|
report.rules = new_rules |
25
|
|
|
return report |
26
|
|
|
|
27
|
|
|
|
28
|
1 |
|
def remove_all_rules_by_severity(report, severity=()): |
29
|
|
|
new_rules = {} |
30
|
|
|
for rule_id, rule in report.rules.items(): |
31
|
|
|
if rule.severity.lower() not in severity: |
32
|
|
|
new_rules[rule_id] = rule |
33
|
|
|
report.rules = new_rules |
34
|
|
|
return report |
35
|
|
|
|
36
|
|
|
|
37
|
1 |
|
@pytest.mark.parametrize("to_remove, result", [ |
38
|
|
|
( |
39
|
|
|
(), |
40
|
|
|
{"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702} |
41
|
|
|
), |
42
|
|
|
( |
43
|
|
|
("fail", "pass"), |
44
|
|
|
{"fail": 0, "pass": 0, "unknown_error": 0, "other": 69, "sum_of_rules": 69} |
45
|
|
|
), |
46
|
|
|
( |
47
|
|
|
("fail"), |
48
|
|
|
{"fail": 0, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 260} |
49
|
|
|
), |
50
|
|
|
( |
51
|
|
|
("fail", "notchecked"), |
52
|
|
|
{"fail": 0, "pass": 191, "unknown_error": 0, "other": 0, "sum_of_rules": 191} |
53
|
|
|
), |
54
|
|
|
( |
55
|
|
|
("pass"), |
56
|
|
|
{"fail": 442, "pass": 0, "unknown_error": 0, "other": 69, "sum_of_rules": 511} |
57
|
|
|
), |
58
|
|
|
( |
59
|
|
|
("pass", "notchecked"), |
60
|
|
|
{"fail": 442, "pass": 0, "unknown_error": 0, "other": 0, "sum_of_rules": 442} |
61
|
|
|
), |
62
|
|
|
( |
63
|
|
|
("notchecked"), |
64
|
|
|
{"fail": 442, "pass": 191, "unknown_error": 0, "other": 0, "sum_of_rules": 633} |
65
|
|
|
), |
66
|
|
|
( |
67
|
|
|
("error", "unknown"), |
68
|
|
|
{"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702} |
69
|
|
|
), |
70
|
|
|
( |
71
|
|
|
("notselected", "notapplicable"), |
72
|
|
|
{"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702} |
73
|
|
|
), |
74
|
|
|
]) |
75
|
1 |
|
def test_report_rule_results_stats(to_remove, result): |
76
|
|
|
report = remove_all_rules_by_result(get_report(), to_remove) |
77
|
|
|
rule_results_stats = report.get_rule_results_stats() |
78
|
|
|
for key in result: |
79
|
|
|
assert result[key] == rule_results_stats[key] |
80
|
|
|
|
81
|
|
|
|
82
|
1 |
|
@pytest.mark.parametrize("to_remove", [ |
83
|
|
|
("fail", "pass", "notchecked", "error", "unknown", "error"), |
84
|
|
|
("fail", "pass", "notchecked"), |
85
|
|
|
]) |
86
|
1 |
|
def test_report_rule_results_stats_without_processable_rules(to_remove, caplog): |
87
|
|
|
report = remove_all_rules_by_result(get_report(), to_remove) |
88
|
|
|
caplog.set_level(logging.WARNING) |
89
|
|
|
report.get_rule_results_stats() |
90
|
|
|
assert 'There are no applicable or selected rules.' in caplog.text |
91
|
|
|
|
92
|
|
|
|
93
|
1 |
|
@pytest.mark.parametrize("to_remove, result", [ |
94
|
|
|
( |
95
|
|
|
(), |
96
|
|
|
{"low": 33, "medium": 351, "high": 25, "unknown": 33, "sum_of_failed_rules": 442} |
97
|
|
|
), |
98
|
|
|
( |
99
|
|
|
("low"), |
100
|
|
|
{"low": 0, "medium": 351, "high": 25, "unknown": 33, "sum_of_failed_rules": 409} |
101
|
|
|
), |
102
|
|
|
( |
103
|
|
|
("medium"), |
104
|
|
|
{"low": 33, "medium": 0, "high": 25, "unknown": 33, "sum_of_failed_rules": 91} |
105
|
|
|
), |
106
|
|
|
( |
107
|
|
|
("high"), |
108
|
|
|
{"low": 33, "medium": 351, "high": 0, "unknown": 33, "sum_of_failed_rules": 417} |
109
|
|
|
), |
110
|
|
|
( |
111
|
|
|
("unknown"), |
112
|
|
|
{"low": 33, "medium": 351, "high": 25, "unknown": 0, "sum_of_failed_rules": 409} |
113
|
|
|
), |
114
|
|
|
( |
115
|
|
|
("low", "medium"), |
116
|
|
|
{"low": 0, "medium": 0, "high": 25, "unknown": 33, "sum_of_failed_rules": 58} |
117
|
|
|
), |
118
|
|
|
( |
119
|
|
|
("high", "unknown"), |
120
|
|
|
{"low": 33, "medium": 351, "high": 0, "unknown": 0, "sum_of_failed_rules": 384} |
121
|
|
|
), |
122
|
|
|
( |
123
|
|
|
("medium", "high"), |
124
|
|
|
{"low": 33, "medium": 0, "high": 0, "unknown": 33, "sum_of_failed_rules": 66} |
125
|
|
|
), |
126
|
|
|
( |
127
|
|
|
("low", "unknown"), |
128
|
|
|
{"low": 0, "medium": 351, "high": 25, "unknown": 0, "sum_of_failed_rules": 376} |
129
|
|
|
), |
130
|
|
|
( |
131
|
|
|
("low", "medium", "high"), |
132
|
|
|
{"low": 0, "medium": 0, "high": 0, "unknown": 33, "sum_of_failed_rules": 33} |
133
|
|
|
), |
134
|
|
|
( |
135
|
|
|
("low", "medium", "unknown"), |
136
|
|
|
{"low": 0, "medium": 0, "high": 25, "unknown": 0, "sum_of_failed_rules": 25} |
137
|
|
|
), |
138
|
|
|
( |
139
|
|
|
("low", "high", "unknown"), |
140
|
|
|
{"low": 0, "medium": 351, "high": 0, "unknown": 0, "sum_of_failed_rules": 351} |
141
|
|
|
), |
142
|
|
|
( |
143
|
|
|
("medium", "high", "unknown"), |
144
|
|
|
{"low": 33, "medium": 0, "high": 0, "unknown": 0, "sum_of_failed_rules": 33} |
145
|
|
|
), |
146
|
|
|
]) |
147
|
1 |
|
def test_report_severity_of_failed_rules_stats(to_remove, result): |
148
|
|
|
report = remove_all_rules_by_severity(get_report(), to_remove) |
149
|
|
|
severity_of_failed_rules_stats = report.get_severity_of_failed_rules_stats() |
150
|
|
|
for key in result: |
151
|
|
|
assert result[key] == severity_of_failed_rules_stats[key] |
152
|
|
|
|
153
|
|
|
|
154
|
1 |
|
def test_report_severity_of_failed_rules_without_any_rules(): |
155
|
|
|
report = remove_all_rules_by_severity(get_report(), ("low", "medium", "high", "unknown")) |
156
|
|
|
with pytest.raises(MissingProcessableRules): |
157
|
|
|
assert report.get_severity_of_failed_rules_stats() |
158
|
|
|
|
159
|
|
|
|
160
|
1 |
|
def test_report_severity_of_failed_rules_stats_without_failed_rules(): |
161
|
|
|
report = remove_all_rules_by_result(get_report(), ("fail")) |
162
|
|
|
with pytest.raises(MissingProcessableRules): |
163
|
|
|
assert report.get_severity_of_failed_rules_stats() |
164
|
|
|
|
165
|
|
|
|
166
|
1 |
|
@pytest.mark.parametrize("system, type_of_remediation", [ |
167
|
|
|
("Unknown_system", "script"), |
168
|
|
|
("urn:xccdf:fix:script:sh", "Shell script"), |
169
|
|
|
("urn:xccdf:fix:script:ansible", "Ansible snippet"), |
170
|
|
|
("urn:xccdf:fix:script:puppet", "Puppet snippet"), |
171
|
|
|
("urn:redhat:anaconda:pre", "Anaconda snippet"), |
172
|
|
|
("urn:xccdf:fix:script:kubernetes", "Kubernetes snippet"), |
173
|
|
|
("urn:redhat:osbuild:blueprint", "OSBuild Blueprint snippet"), |
174
|
|
|
("urn:xccdf:fix:script:pejskoskript", "script"), |
175
|
|
|
]) |
176
|
1 |
|
def test_remediation_type(system, type_of_remediation): |
177
|
|
|
remediation = Remediation(system=system) |
178
|
|
|
assert remediation.get_type() == type_of_remediation |
179
|
|
|
|