Passed
Pull Request — master (#30)
by Jan
05:08
created

tests.unit_tests.test_data_structure   A

Complexity

Total Complexity 18

Size/Duplication

Total Lines 174
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 129
dl 0
loc 174
rs 10
c 0
b 0
f 0
wmc 18

9 Functions

Rating   Name   Duplication   Size   Complexity  
A remove_all_rules_by_result() 0 7 3
A remove_all_rules_by_severity() 0 7 3
A get_report() 0 3 1
A test_remediation_type() 0 13 1
A test_report_rule_results_stats_without_processable_rules() 0 8 2
A test_report_severity_of_failed_rules_without_any_rules() 0 4 2
A test_report_rule_results_stats() 0 43 2
B test_report_severity_of_failed_rules_stats() 0 59 2
A test_report_severity_of_failed_rules_stats_without_failed_rules() 0 4 2
1
import pytest
2
3
from oscap_report.scap_results_parser.data_structures import Remediation
4
from oscap_report.scap_results_parser.exceptions import MissingProcessableRules
5
from tests.unit_tests.test_scap_result_parser import get_parser
6
7
from ..constants import PATH_TO_ARF
8
9
10
def get_report():
11
    parser = get_parser(PATH_TO_ARF)
12
    return parser.parse_report()
13
14
15
def remove_all_rules_by_result(report, result=()):
16
    new_rules = {}
17
    for rule_id, rule in report.rules.items():
18
        if rule.result.lower() not in result:
19
            new_rules[rule_id] = rule
20
    report.rules = new_rules
21
    return report
22
23
24
def remove_all_rules_by_severity(report, severity=()):
25
    new_rules = {}
26
    for rule_id, rule in report.rules.items():
27
        if rule.severity.lower() not in severity:
28
            new_rules[rule_id] = rule
29
    report.rules = new_rules
30
    return report
31
32
33
@pytest.mark.parametrize("to_remove, result", [
34
    (
35
        (),
36
        {"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702}
37
    ),
38
    (
39
        ("fail", "pass"),
40
        {"fail": 0, "pass": 0, "unknown_error": 0, "other": 69, "sum_of_rules": 69}
41
    ),
42
    (
43
        ("fail"),
44
        {"fail": 0, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 260}
45
    ),
46
    (
47
        ("fail", "notchecked"),
48
        {"fail": 0, "pass": 191, "unknown_error": 0, "other": 0, "sum_of_rules": 191}
49
    ),
50
    (
51
        ("pass"),
52
        {"fail": 442, "pass": 0, "unknown_error": 0, "other": 69, "sum_of_rules": 511}
53
    ),
54
    (
55
        ("pass", "notchecked"),
56
        {"fail": 442, "pass": 0, "unknown_error": 0, "other": 0, "sum_of_rules": 442}
57
    ),
58
    (
59
        ("notchecked"),
60
        {"fail": 442, "pass": 191, "unknown_error": 0, "other": 0, "sum_of_rules": 633}
61
    ),
62
    (
63
        ("error", "unknown"),
64
        {"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702}
65
    ),
66
    (
67
        ("notselected", "notapplicable"),
68
        {"fail": 442, "pass": 191, "unknown_error": 0, "other": 69, "sum_of_rules": 702}
69
    ),
70
])
71
def test_report_rule_results_stats(to_remove, result):
72
    report = remove_all_rules_by_result(get_report(), to_remove)
73
    rule_results_stats = report.get_rule_results_stats()
74
    for key in result:
75
        assert result[key] == rule_results_stats[key]
76
77
78
@pytest.mark.parametrize("to_remove", [
79
    ("fail", "pass", "notchecked", "error", "unknown", "error"),
80
    ("fail", "pass", "notchecked"),
81
])
82
def test_report_rule_results_stats_without_processable_rules(to_remove):
83
    report = remove_all_rules_by_result(get_report(), to_remove)
84
    with pytest.raises(MissingProcessableRules):
85
        assert report.get_rule_results_stats()
86
87
88
@pytest.mark.parametrize("to_remove, result", [
89
    (
90
        (),
91
        {"low": 33, "medium": 351, "high": 25, "unknown": 33, "sum_of_failed_rules": 442}
92
    ),
93
    (
94
        ("low"),
95
        {"low": 0, "medium": 351, "high": 25, "unknown": 33, "sum_of_failed_rules": 409}
96
    ),
97
    (
98
        ("medium"),
99
        {"low": 33, "medium": 0, "high": 25, "unknown": 33, "sum_of_failed_rules": 91}
100
    ),
101
    (
102
        ("high"),
103
        {"low": 33, "medium": 351, "high": 0, "unknown": 33, "sum_of_failed_rules": 417}
104
    ),
105
    (
106
        ("unknown"),
107
        {"low": 33, "medium": 351, "high": 25, "unknown": 0, "sum_of_failed_rules": 409}
108
    ),
109
    (
110
        ("low", "medium"),
111
        {"low": 0, "medium": 0, "high": 25, "unknown": 33, "sum_of_failed_rules": 58}
112
    ),
113
    (
114
        ("high", "unknown"),
115
        {"low": 33, "medium": 351, "high": 0, "unknown": 0, "sum_of_failed_rules": 384}
116
    ),
117
    (
118
        ("medium", "high"),
119
        {"low": 33, "medium": 0, "high": 0, "unknown": 33, "sum_of_failed_rules": 66}
120
    ),
121
    (
122
        ("low", "unknown"),
123
        {"low": 0, "medium": 351, "high": 25, "unknown": 0, "sum_of_failed_rules": 376}
124
    ),
125
    (
126
        ("low", "medium", "high"),
127
        {"low": 0, "medium": 0, "high": 0, "unknown": 33, "sum_of_failed_rules": 33}
128
    ),
129
    (
130
        ("low", "medium", "unknown"),
131
        {"low": 0, "medium": 0, "high": 25, "unknown": 0, "sum_of_failed_rules": 25}
132
    ),
133
    (
134
        ("low", "high", "unknown"),
135
        {"low": 0, "medium": 351, "high": 0, "unknown": 0, "sum_of_failed_rules": 351}
136
    ),
137
    (
138
        ("medium", "high", "unknown"),
139
        {"low": 33, "medium": 0, "high": 0, "unknown": 0, "sum_of_failed_rules": 33}
140
    ),
141
])
142
def test_report_severity_of_failed_rules_stats(to_remove, result):
143
    report = remove_all_rules_by_severity(get_report(), to_remove)
144
    severity_of_failed_rules_stats = report.get_severity_of_failed_rules_stats()
145
    for key in result:
146
        assert result[key] == severity_of_failed_rules_stats[key]
147
148
149
def test_report_severity_of_failed_rules_without_any_rules():
150
    report = remove_all_rules_by_severity(get_report(), ("low", "medium", "high", "unknown"))
151
    with pytest.raises(MissingProcessableRules):
152
        assert report.get_severity_of_failed_rules_stats()
153
154
155
def test_report_severity_of_failed_rules_stats_without_failed_rules():
156
    report = remove_all_rules_by_result(get_report(), ("fail"))
157
    with pytest.raises(MissingProcessableRules):
158
        assert report.get_severity_of_failed_rules_stats()
159
160
161
@pytest.mark.parametrize("system, type_of_remediation", [
162
    ("Unknown_system", "script"),
163
    ("urn:xccdf:fix:script:sh", "Shell script"),
164
    ("urn:xccdf:fix:script:ansible", "Ansible snippet"),
165
    ("urn:xccdf:fix:script:puppet", "Puppet snippet"),
166
    ("urn:redhat:anaconda:pre", "Anaconda snippet"),
167
    ("urn:xccdf:fix:script:kubernetes", "Kubernetes snippet"),
168
    ("urn:redhat:osbuild:blueprint", "OSBuild Blueprint snippet"),
169
    ("urn:xccdf:fix:script:pejskoskript", "script"),
170
])
171
def test_remediation_type(system, type_of_remediation):
172
    remediation = Remediation(system=system)
173
    assert remediation.get_type() == type_of_remediation
174