1
|
|
|
# Copyright 2022, Red Hat, Inc. |
2
|
|
|
# SPDX-License-Identifier: LGPL-2.1-or-later |
3
|
|
|
|
4
|
|
|
import logging |
5
|
|
|
|
6
|
|
|
import pytest |
7
|
|
|
|
8
|
|
|
from openscap_report.scap_results_parser import MissingProcessableRules |
9
|
|
|
from openscap_report.scap_results_parser.data_structures import Remediation |
10
|
|
|
|
11
|
|
|
from ..constants import (PATH_TO_ARF, PATH_TO_ARF_SCANNED_ON_CONTAINER, |
12
|
|
|
PATH_TO_SIMPLE_RULE_FAIL_ARF, |
13
|
|
|
PATH_TO_SIMPLE_RULE_FAIL_XCCDF, |
14
|
|
|
PATH_TO_SIMPLE_RULE_PASS_ARF, |
15
|
|
|
PATH_TO_SIMPLE_RULE_PASS_XCCDF, PATH_TO_XCCDF) |
16
|
|
|
from ..test_utils import get_parser, get_report |
17
|
|
|
|
18
|
|
|
|
19
|
|
|
def remove_all_rules_by_result(report, result=()): |
20
|
|
|
new_rules = {} |
21
|
|
|
for rule_id, rule in report.rules.items(): |
22
|
|
|
if rule.result.lower() not in result: |
23
|
|
|
new_rules[rule_id] = rule |
24
|
|
|
report.rules = new_rules |
25
|
|
|
return report |
26
|
|
|
|
27
|
|
|
|
28
|
|
|
def remove_all_rules_by_severity(report, severity=()): |
29
|
|
|
new_rules = {} |
30
|
|
|
for rule_id, rule in report.rules.items(): |
31
|
|
|
if rule.severity.lower() not in severity: |
32
|
|
|
new_rules[rule_id] = rule |
33
|
|
|
report.rules = new_rules |
34
|
|
|
return report |
35
|
|
|
|
36
|
|
|
|
37
|
|
|
@pytest.mark.unit_test |
38
|
|
|
@pytest.mark.parametrize("to_remove, result", [ |
39
|
|
|
( |
40
|
|
|
(), |
41
|
|
|
{"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702} |
42
|
|
|
), |
43
|
|
|
( |
44
|
|
|
("fail", "pass"), |
45
|
|
|
{"fail": 0, "pass": 0, "unknown_error": 0, "other": 69, "sum_of_rules": 69} |
46
|
|
|
), |
47
|
|
|
( |
48
|
|
|
("fail"), |
49
|
|
|
{"fail": 0, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 260} |
50
|
|
|
), |
51
|
|
|
( |
52
|
|
|
("fail", "notchecked"), |
53
|
|
|
{"fail": 0, "pass": 191, "unknown_error": 0, "other": 0, "sum_of_rules": 191} |
54
|
|
|
), |
55
|
|
|
( |
56
|
|
|
("pass"), |
57
|
|
|
{"fail": 442, "pass": 0, "unknown_error": 0, "other": 69, "sum_of_rules": 511} |
58
|
|
|
), |
59
|
|
|
( |
60
|
|
|
("pass", "notchecked"), |
61
|
|
|
{"fail": 442, "pass": 0, "unknown_error": 0, "other": 0, "sum_of_rules": 442} |
62
|
|
|
), |
63
|
|
|
( |
64
|
|
|
("notchecked"), |
65
|
|
|
{"fail": 442, "pass": 191, "unknown_error": 0, "other": 0, "sum_of_rules": 633} |
66
|
|
|
), |
67
|
|
|
( |
68
|
|
|
("error", "unknown"), |
69
|
|
|
{"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702} |
70
|
|
|
), |
71
|
|
|
( |
72
|
|
|
("notselected", "notapplicable"), |
73
|
|
|
{"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702} |
74
|
|
|
), |
75
|
|
|
]) |
76
|
|
|
def test_report_rule_results_stats(to_remove, result): |
77
|
|
|
report = remove_all_rules_by_result(get_report(), to_remove) |
78
|
|
|
rule_results_stats = report.get_rule_results_stats() |
79
|
|
|
for key in result: |
80
|
|
|
assert result[key] == rule_results_stats[key] |
81
|
|
|
|
82
|
|
|
|
83
|
|
|
@pytest.mark.unit_test |
84
|
|
|
@pytest.mark.parametrize("to_remove", [ |
85
|
|
|
("fail", "pass", "notchecked", "error", "unknown", "error"), |
86
|
|
|
("fail", "pass", "notchecked"), |
87
|
|
|
]) |
88
|
|
|
def test_report_rule_results_stats_without_processable_rules(to_remove, caplog): |
89
|
|
|
report = remove_all_rules_by_result(get_report(), to_remove) |
90
|
|
|
caplog.set_level(logging.WARNING) |
91
|
|
|
report.get_rule_results_stats() |
92
|
|
|
assert 'There are no applicable or selected rules.' in caplog.text |
93
|
|
|
|
94
|
|
|
|
95
|
|
|
@pytest.mark.unit_test |
96
|
|
|
@pytest.mark.parametrize("to_remove, result", [ |
97
|
|
|
( |
98
|
|
|
(), |
99
|
|
|
{"low": 33, "medium": 351, "high": 25, "unknown": 33, "sum_of_failed_rules": 442} |
100
|
|
|
), |
101
|
|
|
( |
102
|
|
|
("low"), |
103
|
|
|
{"low": 0, "medium": 351, "high": 25, "unknown": 33, "sum_of_failed_rules": 409} |
104
|
|
|
), |
105
|
|
|
( |
106
|
|
|
("medium"), |
107
|
|
|
{"low": 33, "medium": 0, "high": 25, "unknown": 33, "sum_of_failed_rules": 91} |
108
|
|
|
), |
109
|
|
|
( |
110
|
|
|
("high"), |
111
|
|
|
{"low": 33, "medium": 351, "high": 0, "unknown": 33, "sum_of_failed_rules": 417} |
112
|
|
|
), |
113
|
|
|
( |
114
|
|
|
("unknown"), |
115
|
|
|
{"low": 33, "medium": 351, "high": 25, "unknown": 0, "sum_of_failed_rules": 409} |
116
|
|
|
), |
117
|
|
|
( |
118
|
|
|
("low", "medium"), |
119
|
|
|
{"low": 0, "medium": 0, "high": 25, "unknown": 33, "sum_of_failed_rules": 58} |
120
|
|
|
), |
121
|
|
|
( |
122
|
|
|
("high", "unknown"), |
123
|
|
|
{"low": 33, "medium": 351, "high": 0, "unknown": 0, "sum_of_failed_rules": 384} |
124
|
|
|
), |
125
|
|
|
( |
126
|
|
|
("medium", "high"), |
127
|
|
|
{"low": 33, "medium": 0, "high": 0, "unknown": 33, "sum_of_failed_rules": 66} |
128
|
|
|
), |
129
|
|
|
( |
130
|
|
|
("low", "unknown"), |
131
|
|
|
{"low": 0, "medium": 351, "high": 25, "unknown": 0, "sum_of_failed_rules": 376} |
132
|
|
|
), |
133
|
|
|
( |
134
|
|
|
("low", "medium", "high"), |
135
|
|
|
{"low": 0, "medium": 0, "high": 0, "unknown": 33, "sum_of_failed_rules": 33} |
136
|
|
|
), |
137
|
|
|
( |
138
|
|
|
("low", "medium", "unknown"), |
139
|
|
|
{"low": 0, "medium": 0, "high": 25, "unknown": 0, "sum_of_failed_rules": 25} |
140
|
|
|
), |
141
|
|
|
( |
142
|
|
|
("low", "high", "unknown"), |
143
|
|
|
{"low": 0, "medium": 351, "high": 0, "unknown": 0, "sum_of_failed_rules": 351} |
144
|
|
|
), |
145
|
|
|
( |
146
|
|
|
("medium", "high", "unknown"), |
147
|
|
|
{"low": 33, "medium": 0, "high": 0, "unknown": 0, "sum_of_failed_rules": 33} |
148
|
|
|
), |
149
|
|
|
]) |
150
|
|
|
def test_report_severity_of_failed_rules_stats(to_remove, result): |
151
|
|
|
report = remove_all_rules_by_severity(get_report(), to_remove) |
152
|
|
|
severity_of_failed_rules_stats = report.get_severity_of_failed_rules_stats() |
153
|
|
|
for key in result: |
154
|
|
|
assert result[key] == severity_of_failed_rules_stats[key] |
155
|
|
|
|
156
|
|
|
|
157
|
|
|
@pytest.mark.unit_test |
158
|
|
|
def test_report_severity_of_failed_rules_without_any_rules(): |
159
|
|
|
report = remove_all_rules_by_severity(get_report(), ("low", "medium", "high", "unknown")) |
160
|
|
|
with pytest.raises(MissingProcessableRules): |
161
|
|
|
assert report.get_severity_of_failed_rules_stats() |
162
|
|
|
|
163
|
|
|
|
164
|
|
|
@pytest.mark.unit_test |
165
|
|
|
def test_report_severity_of_failed_rules_stats_without_failed_rules(): |
166
|
|
|
report = remove_all_rules_by_result(get_report(), ("fail")) |
167
|
|
|
with pytest.raises(MissingProcessableRules): |
168
|
|
|
assert report.get_severity_of_failed_rules_stats() |
169
|
|
|
|
170
|
|
|
|
171
|
|
|
@pytest.mark.unit_test |
172
|
|
|
@pytest.mark.parametrize("system, type_of_remediation", [ |
173
|
|
|
("Unknown_system", "script"), |
174
|
|
|
("urn:xccdf:fix:script:sh", "Shell script"), |
175
|
|
|
("urn:xccdf:fix:script:ansible", "Ansible snippet"), |
176
|
|
|
("urn:xccdf:fix:script:puppet", "Puppet snippet"), |
177
|
|
|
("urn:redhat:anaconda:pre", "Anaconda snippet"), |
178
|
|
|
("urn:xccdf:fix:script:kubernetes", "Kubernetes snippet"), |
179
|
|
|
("urn:redhat:osbuild:blueprint", "OSBuild Blueprint snippet"), |
180
|
|
|
("urn:xccdf:fix:script:pejskoskript", "script"), |
181
|
|
|
]) |
182
|
|
|
def test_remediation_type(system, type_of_remediation): |
183
|
|
|
remediation = Remediation(remediation_id="ID-1234", system=system) |
184
|
|
|
assert remediation.get_type() == type_of_remediation |
185
|
|
|
|
186
|
|
|
|
187
|
|
|
@pytest.mark.unit_test |
188
|
|
|
@pytest.mark.parametrize("file_path, count_of_selected_rules", [ |
189
|
|
|
(PATH_TO_ARF, 714), |
190
|
|
|
(PATH_TO_XCCDF, 712), |
191
|
|
|
(PATH_TO_ARF_SCANNED_ON_CONTAINER, 121), |
192
|
|
|
(PATH_TO_SIMPLE_RULE_FAIL_ARF, 1), |
193
|
|
|
(PATH_TO_SIMPLE_RULE_FAIL_XCCDF, 1), |
194
|
|
|
(PATH_TO_SIMPLE_RULE_PASS_ARF, 1), |
195
|
|
|
(PATH_TO_SIMPLE_RULE_PASS_XCCDF, 1) |
196
|
|
|
]) |
197
|
|
|
def test_report_get_selected_rules(file_path, count_of_selected_rules): |
198
|
|
|
parser = get_parser(file_path) |
199
|
|
|
report = parser.parse_report() |
200
|
|
|
assert len(report.get_selected_rules()) == count_of_selected_rules |
201
|
|
|
|