1
|
|
|
# Copyright 2022, Red Hat, Inc. |
2
|
|
|
# SPDX-License-Identifier: LGPL-2.1-or-later |
3
|
|
|
|
4
|
1 |
|
import logging |
5
|
1 |
|
from typing import Dict |
6
|
|
|
|
7
|
1 |
|
from openscap_report.dataclasses import asdict, dataclass, field |
8
|
|
|
|
9
|
1 |
|
from ..exceptions import MissingProcessableRules |
10
|
1 |
|
from .group import GROUP_JSON_KEYS, Group |
11
|
1 |
|
from .identifier import IDENTIFIER_JSON_KEYS |
12
|
1 |
|
from .json_transformation import (rearrange_identifiers, rearrange_references, |
13
|
|
|
remove_empty_values, |
14
|
|
|
remove_not_selected_rules) |
15
|
1 |
|
from .oval_definition import OVAL_DEFINITION_JSON_KEYS |
16
|
1 |
|
from .profile_info import PROFILE_JSON_KEYS, ProfileInfo |
17
|
1 |
|
from .reference import REFERENCE_JSON_KEYS |
18
|
1 |
|
from .remediation import REMEDIATION_JSON_KEYS |
19
|
1 |
|
from .result_of_scan import SCAN_JSON_KEYS, ResultOfScan |
20
|
1 |
|
from .rule import RULE_JSON_KEYS, Rule |
21
|
1 |
|
from .warning import WARNING_JSON_KEYS |
22
|
|
|
|
23
|
1 |
|
JSON_REPORT_CONTENT = [ |
24
|
|
|
"profile_info", |
25
|
|
|
"scan_result", |
26
|
|
|
"rules", |
27
|
|
|
*GROUP_JSON_KEYS, |
28
|
|
|
*IDENTIFIER_JSON_KEYS, |
29
|
|
|
*OVAL_DEFINITION_JSON_KEYS, |
30
|
|
|
*PROFILE_JSON_KEYS, |
31
|
|
|
*REFERENCE_JSON_KEYS, |
32
|
|
|
*REMEDIATION_JSON_KEYS, |
33
|
|
|
*RULE_JSON_KEYS, |
34
|
|
|
*SCAN_JSON_KEYS, |
35
|
|
|
*WARNING_JSON_KEYS, |
36
|
|
|
] |
37
|
|
|
|
38
|
|
|
|
39
|
1 |
|
@dataclass |
40
|
1 |
|
class Report: |
41
|
1 |
|
profile_info: ProfileInfo = field(default_factory=ProfileInfo) |
42
|
1 |
|
scan_result: ResultOfScan = field(default_factory=ResultOfScan) |
43
|
1 |
|
rules: Dict[str, Rule] = field(default_factory=dict) |
44
|
1 |
|
groups: Dict[str, Group] = field(default_factory=dict) |
45
|
|
|
|
46
|
1 |
|
@staticmethod |
47
|
1 |
|
def default_json_filter(dictionary): |
48
|
|
|
return {key: value for (key, value) in dictionary if key in JSON_REPORT_CONTENT} |
49
|
|
|
|
50
|
1 |
|
def as_dict_for_default_json(self): |
51
|
|
|
json_dict = asdict(self, dict_factory=self.default_json_filter) |
52
|
|
|
remove_not_selected_rules(json_dict, self.profile_info.selected_rules_ids) |
53
|
|
|
rearrange_references(json_dict) |
54
|
|
|
rearrange_identifiers(json_dict) |
55
|
|
|
json_dict = remove_empty_values(json_dict) |
56
|
|
|
return json_dict |
57
|
|
|
|
58
|
1 |
|
def as_dict(self): |
59
|
|
|
return asdict(self) |
60
|
|
|
|
61
|
1 |
|
def get_selected_rules(self): |
62
|
1 |
|
if not self.profile_info.selected_rules_ids: |
63
|
1 |
|
return [ |
64
|
|
|
(rule_id, rule) |
65
|
|
|
for rule_id, rule in self.rules.items() |
66
|
|
|
if rule.result != "notselected" |
67
|
|
|
] |
68
|
1 |
|
out = [] |
69
|
1 |
|
for rule_id in self.profile_info.selected_rules_ids: |
70
|
1 |
|
if rule_id in self.rules: |
71
|
1 |
|
out.append((rule_id, self.rules[rule_id])) |
72
|
|
|
else: |
73
|
1 |
|
logging.warning("Missing definition of selected rule: '%s'", rule_id) |
74
|
1 |
|
return out |
75
|
|
|
|
76
|
1 |
|
def get_rule_results_stats(self): |
77
|
1 |
|
results_stats = { |
78
|
|
|
"fail": len(list( |
79
|
|
|
filter(lambda rule: rule.result.lower() == "fail", self.rules.values()))), |
80
|
|
|
"pass": len(list( |
81
|
|
|
filter( |
82
|
|
|
lambda rule: rule.result.lower() in ("pass", "fixed"), self.rules.values()))), |
83
|
|
|
"unknown_error": len(list( |
84
|
|
|
filter( |
85
|
|
|
lambda rule: rule.result.lower() in ( |
86
|
|
|
"error", "unknown", "fix unsuccessful", "fix failed" |
87
|
|
|
), self.rules.values()))), |
88
|
|
|
} |
89
|
1 |
|
not_ignored_rules = len(list( |
90
|
|
|
filter( |
91
|
|
|
lambda rule: rule.result.lower() not in ("notselected", "notapplicable"), |
92
|
|
|
self.rules.values() |
93
|
|
|
))) |
94
|
1 |
|
if not_ignored_rules == 0: |
95
|
1 |
|
not_ignored_rules = 1 |
96
|
1 |
|
logging.warning("There are no applicable or selected rules.") |
97
|
1 |
|
percent_per_rule = 100 / not_ignored_rules |
98
|
1 |
|
results_stats["other"] = not_ignored_rules - results_stats["fail"] - results_stats['pass'] |
99
|
1 |
|
results_stats["fail_percent"] = results_stats["fail"] * percent_per_rule |
100
|
1 |
|
results_stats["pass_percent"] = results_stats["pass"] * percent_per_rule |
101
|
1 |
|
results_stats["other_percent"] = results_stats["other"] * percent_per_rule |
102
|
1 |
|
results_stats["sum_of_rules"] = not_ignored_rules |
103
|
1 |
|
return results_stats |
104
|
|
|
|
105
|
1 |
|
def get_severity_of_failed_rules_stats(self): |
106
|
1 |
|
failed_rules = self.get_failed_rules() |
107
|
1 |
|
count_of_failed_rules = len(failed_rules) |
108
|
1 |
|
if count_of_failed_rules == 0: |
109
|
1 |
|
raise MissingProcessableRules("There are no failed rules!") |
110
|
1 |
|
percent_per_rule = 100 / count_of_failed_rules |
111
|
1 |
|
severity_stats = { |
112
|
|
|
"low": sum(map(lambda rule: rule.severity.lower() == "low", failed_rules)), |
113
|
|
|
"medium": sum(map(lambda rule: rule.severity.lower() == "medium", failed_rules)), |
114
|
|
|
"high": sum(map(lambda rule: rule.severity.lower() == "high", failed_rules)), |
115
|
|
|
"unknown": sum(map(lambda rule: rule.severity.lower() == "unknown", failed_rules)), |
116
|
|
|
} |
117
|
1 |
|
severity_stats["low_percent"] = severity_stats["low"] * percent_per_rule |
118
|
1 |
|
severity_stats["medium_percent"] = severity_stats["medium"] * percent_per_rule |
119
|
1 |
|
severity_stats["high_percent"] = severity_stats["high"] * percent_per_rule |
120
|
1 |
|
severity_stats["unknown_percent"] = severity_stats["unknown"] * percent_per_rule |
121
|
1 |
|
severity_stats["sum_of_failed_rules"] = len(failed_rules) |
122
|
1 |
|
return severity_stats |
123
|
|
|
|
124
|
1 |
|
def get_failed_rules(self): |
125
|
|
|
return list(filter(lambda rule: rule.result.lower() == "fail", self.rules.values())) |
126
|
|
|
|