Passed
Pull Request — master (#47)
by Jan
06:18
created

openscap_report.scap_results_parser.data_structures   A

Complexity

Total Complexity 27

Size/Duplication

Total Lines 245
Duplicated Lines 0 %

Test Coverage

Coverage 90.4%

Importance

Changes 0
Metric Value
wmc 27
eloc 208
dl 0
loc 245
ccs 113
cts 125
cp 0.904
rs 10
c 0
b 0
f 0

12 Methods

Rating   Name   Duplication   Size   Complexity  
A Rule.as_dict() 0 18 1
A OvalNode.as_json() 0 2 1
A Remediation.get_type() 0 10 1
A Report.as_dict() 0 18 1
A OvalTest.as_dict() 0 6 1
A Report.get_failed_rules() 0 2 2
A OvalNode.as_dict() 0 21 2
A OvalObject.as_dict() 0 6 1
B Report.get_severity_of_failed_rules_stats() 0 18 6
B Report.get_rule_results_stats() 0 24 6
A Remediation.as_dict() 0 8 1
A OvalNode.log_oval_tree() 0 10 4
1 1
import json
2 1
import logging
3 1
from dataclasses import dataclass
4
5 1
from .exceptions import MissingProcessableRules
6
7
8 1
@dataclass
9 1
class Report:  # pylint: disable=R0902
10 1
    title: str = ""
11 1
    identity: str = ""
12 1
    profile_name: str = ""
13 1
    target: str = ""
14 1
    cpe_platforms: str = ""
15 1
    scanner: str = ""
16 1
    scanner_version: str = ""
17 1
    benchmark_url: str = ""
18 1
    benchmark_id: str = ""
19 1
    benchmark_version: str = ""
20 1
    start_time: str = ""
21 1
    end_time: str = ""
22 1
    test_system: str = ""
23 1
    score: float = 0.0
24 1
    score_max: float = 0.0
25 1
    rules: dict = None
26
27 1
    def as_dict(self):
28
        return {
29
            "title": self.title,
30
            "profile_name": self.profile_name,
31
            "target": self.target,
32
            "identit": self.identity,
33
            "cpe_platforms": self.cpe_platforms,
34
            "scanner": self.scanner,
35
            "scanner_version": self.scanner_version,
36
            "benchmark_url": self.benchmark_url,
37
            "benchmark_id": self.benchmark_id,
38
            "benchmark_version": self.benchmark_version,
39
            "start_time": self.start_time,
40
            "end_time": self.end_time,
41
            "test_system": self.test_system,
42
            "score": self.score,
43
            "score_max": self.score_max,
44
            "rules": self.rules,
45
        }
46
47 1
    def get_rule_results_stats(self):
48 1
        results_stats = {
49
            "fail": len(list(
50
                filter(lambda rule: rule.result.lower() == "fail", self.rules.values()))),
51
            "pass": len(list(
52
                filter(lambda rule: rule.result.lower() == "pass", self.rules.values()))),
53
            "unknown_error": len(list(
54
                filter(lambda rule:
55
                       rule.result.lower() in ("error", "unknown"), self.rules.values()))),
56
        }
57 1
        not_ignored_rules = len(list(
58
            filter(
59
                lambda rule: rule.result.lower() not in ("notselected", "notapplicable"),
60
                self.rules.values()
61
            )))
62 1
        if not_ignored_rules == 0:
63 1
            raise MissingProcessableRules("There are no applicable or selected rules.")
64 1
        percent_per_rule = 100 / not_ignored_rules
65 1
        results_stats["other"] = not_ignored_rules - results_stats["fail"] - results_stats['pass']
66 1
        results_stats["fail_percent"] = results_stats["fail"] * percent_per_rule
67 1
        results_stats["pass_percent"] = results_stats["pass"] * percent_per_rule
68 1
        results_stats["other_percent"] = results_stats["other"] * percent_per_rule
69 1
        results_stats["sum_of_rules"] = not_ignored_rules
70 1
        return results_stats
71
72 1
    def get_severity_of_failed_rules_stats(self):
73 1
        failed_rules = self.get_failed_rules()
74 1
        count_of_failed_rules = len(failed_rules)
75 1
        if count_of_failed_rules == 0:
76 1
            raise MissingProcessableRules("There are no failed rules!")
77 1
        percent_per_rule = 100 / count_of_failed_rules
78 1
        severity_stats = {
79
            "low": sum(map(lambda rule: rule.severity.lower() == "low", failed_rules)),
80
            "medium": sum(map(lambda rule: rule.severity.lower() == "medium", failed_rules)),
81
            "high": sum(map(lambda rule: rule.severity.lower() == "high", failed_rules)),
82
            "unknown": sum(map(lambda rule: rule.severity.lower() == "unknown", failed_rules)),
83
        }
84 1
        severity_stats["low_percent"] = severity_stats["low"] * percent_per_rule
85 1
        severity_stats["medium_percent"] = severity_stats["medium"] * percent_per_rule
86 1
        severity_stats["high_percent"] = severity_stats["high"] * percent_per_rule
87 1
        severity_stats["unknown_percent"] = severity_stats["unknown"] * percent_per_rule
88 1
        severity_stats["sum_of_failed_rules"] = len(failed_rules)
89 1
        return severity_stats
90
91 1
    def get_failed_rules(self):
92 1
        return list(filter(lambda rule: rule.result.lower() == "fail", self.rules.values()))
93
94
95 1
@dataclass
96 1
class OvalObject():
97 1
    object_id: str = ""
98 1
    flag: str = ""
99 1
    object_type: str = ""
100 1
    object_data: dict = None
101
102 1
    def as_dict(self):
103 1
        return {
104
            "object_id": self.object_id,
105
            "flag": self.flag,
106
            "object_type": self.object_type,
107
            "object_data": self.object_data,
108
        }
109
110
111 1
@dataclass
112 1
class OvalTest():
113 1
    test_id: str = ""
114 1
    test_type: str = ""
115 1
    comment: str = ""
116 1
    oval_object: OvalObject = None
117
118 1
    def as_dict(self):
119 1
        return {
120
            "test_id": self.test_id,
121
            "test_type": self.test_type,
122
            "comment": self.comment,
123
            "oval_object": self.oval_object.as_dict(),
124
        }
125
126
127 1
@dataclass
128 1
class OvalNode:  # pylint: disable=R0902
129 1
    node_id: str
130 1
    node_type: str
131 1
    value: str
132 1
    negation: bool = False
133 1
    comment: str = ""
134 1
    tag: str = ""
135 1
    children: list = None
136 1
    test_info: OvalTest = None
137
138 1
    def as_dict(self):
139 1
        if not self.children:
140 1
            return {
141
                'node_id': self.node_id,
142
                'node_type': self.node_type,
143
                'value': self.value,
144
                'negation': self.negation,
145
                'comment': self.comment,
146
                'tag': self.tag,
147
                'test_info': self.test_info.as_dict(),
148
                'children': None
149
            }
150 1
        return {
151
            'node_id': self.node_id,
152
            'node_type': self.node_type,
153
            'value': self.value,
154
            'negation': self.negation,
155
            'comment': self.comment,
156
            'tag': self.tag,
157
            'test_info': None,
158
            'children': [child.as_dict() for child in self.children]
159
        }
160
161 1
    def as_json(self):
162
        return json.dumps(self.as_dict())
163
164 1
    def log_oval_tree(self, level=0):
165
        out = ""
166
        if self.node_type != "value":
167
            out = "  " * level + self.node_type + " = " + self.value
168
        else:
169
            out = "  " * level + self.node_id + " = " + self.value
170
        logging.info(out)
171
        if self.children is not None:
172
            for child in self.children:
173
                child.log_oval_tree(level + 1)
174
175
176 1
@dataclass
177 1
class Rule:  # pylint: disable=R0902
178 1
    rule_id: str = ""
179 1
    title: str = ""
180 1
    result: str = ""
181 1
    multi_check: bool = False
182 1
    time: str = ""
183 1
    severity: str = ""
184 1
    identifiers: list = None
185 1
    references: list = None
186 1
    description: str = ""
187 1
    rationale: str = ""
188 1
    warnings: list = None
189 1
    platform: str = ""
190 1
    oval_definition_id: str = ""
191 1
    message: str = ""
192 1
    remediations: list = None
193 1
    oval_tree: OvalNode = None
194
195 1
    def as_dict(self):
196
        return {
197
            "rule_id": self.rule_id,
198
            "title": self.title,
199
            "result": self.result,
200
            "multi_check": self.multi_check,
201
            "time": self.time,
202
            "severity": self.severity,
203
            "identifiers": self.identifiers,
204
            "references": self.references,
205
            "description": self.description,
206
            "rationale": self.rationale,
207
            "warnings": self.warnings,
208
            "platform": self.platform,
209
            "oval_definition_id": self.oval_definition_id,
210
            "message": self.message,
211
            "remediations": self.remediations,
212
            "oval_tree": self.oval_tree,
213
        }
214
215
216 1
@dataclass
217 1
class Remediation:
218 1
    remediation_id: str = ""
219 1
    system: str = ""
220 1
    complexity: str = ""
221 1
    disruption: str = ""
222 1
    strategy: str = ""
223 1
    fix: str = ""
224
225 1
    def as_dict(self):
226
        return {
227
            "remediation_id": self.remediation_id,
228
            "system": self.system,
229
            "complexity": self.complexity,
230
            "disruption": self.disruption,
231
            "strategy": self.strategy,
232
            "fix": self.fix,
233
        }
234
235 1
    def get_type(self):
236 1
        script_types = {
237
            "urn:xccdf:fix:script:sh": "Shell script",
238
            "urn:xccdf:fix:script:ansible": "Ansible snippet",
239
            "urn:xccdf:fix:script:puppet": "Puppet snippet",
240
            "urn:redhat:anaconda:pre": "Anaconda snippet",
241
            "urn:xccdf:fix:script:kubernetes": "Kubernetes snippet",
242
            "urn:redhat:osbuild:blueprint": "OSBuild Blueprint snippet",
243
        }
244
        return script_types.get(self.system, "script")
245