|
1
|
|
|
|
|
2
|
|
|
from .base_report_backend import BaseReportBackend |
|
3
|
|
|
|
|
4
|
|
|
from ..storage import Storage |
|
5
|
|
|
from ..utils import safe_dumps |
|
6
|
|
|
|
|
7
|
|
|
|
|
8
|
|
|
class FileReportBackend(BaseReportBackend): |
|
9
|
|
|
def __init__(self, config): |
|
10
|
|
|
super(FileReportBackend, self).__init__(config) |
|
11
|
|
|
self.storage = Storage(config.getoption("benchmark_storage"), |
|
12
|
|
|
default_machine_id=self.machine_id, |
|
13
|
|
|
logger=self.logger) |
|
14
|
|
|
|
|
15
|
|
|
@property |
|
16
|
|
|
def _next_num(self): |
|
17
|
|
|
files = self.storage.query("[0-9][0-9][0-9][0-9]_*") |
|
18
|
|
|
files.sort(reverse=True) |
|
19
|
|
|
if not files: |
|
20
|
|
|
return "0001" |
|
21
|
|
|
for f in files: |
|
22
|
|
|
try: |
|
23
|
|
|
return "%04i" % (int(str(f.name).split('_')[0]) + 1) |
|
24
|
|
|
except ValueError: |
|
25
|
|
|
raise |
|
26
|
|
|
|
|
27
|
|
|
def handle_saving(self, benchmarks, machine_info): |
|
28
|
|
|
save = benchmarks and self.save or self.autosave |
|
29
|
|
|
if save or self.json: |
|
30
|
|
|
commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config) |
|
31
|
|
|
self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info) |
|
32
|
|
|
|
|
33
|
|
|
if self.json: |
|
34
|
|
|
output_json = self.config.hook.pytest_benchmark_generate_json( |
|
35
|
|
|
config=self.config, |
|
36
|
|
|
benchmarks=benchmarks, |
|
37
|
|
|
include_data=True, |
|
38
|
|
|
machine_info=machine_info, |
|
39
|
|
|
commit_info=commit_info, |
|
40
|
|
|
) |
|
41
|
|
|
self.config.hook.pytest_benchmark_update_json( |
|
42
|
|
|
config=self.config, |
|
43
|
|
|
benchmarks=benchmarks, |
|
44
|
|
|
output_json=output_json, |
|
45
|
|
|
) |
|
46
|
|
|
with self.json as fh: |
|
47
|
|
|
fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode()) |
|
48
|
|
|
self.logger.info("Wrote benchmark data in: %s" % self.json, purple=True) |
|
49
|
|
|
|
|
50
|
|
|
if save: |
|
51
|
|
|
output_json = self.config.hook.pytest_benchmark_generate_json( |
|
52
|
|
|
config=self.config, |
|
53
|
|
|
benchmarks=benchmarks, |
|
54
|
|
|
include_data=self.save_data, |
|
55
|
|
|
machine_info=machine_info, |
|
56
|
|
|
commit_info=commit_info, |
|
57
|
|
|
) |
|
58
|
|
|
self.config.hook.pytest_benchmark_update_json( |
|
59
|
|
|
config=self.config, |
|
60
|
|
|
benchmarks=benchmarks, |
|
61
|
|
|
output_json=output_json, |
|
62
|
|
|
) |
|
63
|
|
|
output_file = self.storage.get("%s_%s.json" % (self._next_num, save)) |
|
64
|
|
|
self.logger.info("output_file " + str(output_file)) |
|
65
|
|
|
self.logger.info("save " + str(save)) |
|
66
|
|
|
assert not output_file.exists() |
|
67
|
|
|
|
|
68
|
|
|
with output_file.open('wb') as fh: |
|
69
|
|
|
fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode()) |
|
70
|
|
|
self.logger.info("Saved benchmark data in: %s" % output_file) |
|
71
|
|
|
|
|
72
|
|
|
def handle_loading(self, machine_info): |
|
73
|
|
|
compared_mapping = {} |
|
74
|
|
|
if self.compare: |
|
75
|
|
|
if self.compare is True: |
|
76
|
|
|
compared_benchmarks = list(self.storage.load('[0-9][0-9][0-9][0-9]_'))[-1:] |
|
77
|
|
|
else: |
|
78
|
|
|
compared_benchmarks = list(self.storage.load(self.compare)) |
|
79
|
|
|
|
|
80
|
|
|
if not compared_benchmarks: |
|
81
|
|
|
msg = "Can't compare. No benchmark files in %r" % str(self.storage) |
|
82
|
|
|
if self.compare is True: |
|
83
|
|
|
msg += ". Can't load the previous benchmark." |
|
84
|
|
|
code = "BENCHMARK-C2" |
|
85
|
|
|
else: |
|
86
|
|
|
msg += " match %r." % self.compare |
|
87
|
|
|
code = "BENCHMARK-C1" |
|
88
|
|
|
self.logger.warn(code, msg, fslocation=self.storage.location) |
|
89
|
|
|
|
|
90
|
|
|
for path, compared_benchmark in compared_benchmarks: |
|
91
|
|
|
self.config.hook.pytest_benchmark_compare_machine_info( |
|
92
|
|
|
config=self.config, |
|
93
|
|
|
benchmarksession=self, |
|
94
|
|
|
machine_info=machine_info, |
|
95
|
|
|
compared_benchmark=compared_benchmark, |
|
96
|
|
|
) |
|
97
|
|
|
compared_mapping[path] = dict( |
|
98
|
|
|
(bench['fullname'], bench) for bench in compared_benchmark['benchmarks'] |
|
99
|
|
|
) |
|
100
|
|
|
self.logger.info("Comparing against benchmarks from: %s" % path) |
|
101
|
|
|
return compared_mapping |
|
102
|
|
|
|
|
103
|
|
|
|