|
1
|
|
|
from __future__ import division |
|
2
|
|
|
from __future__ import print_function |
|
3
|
|
|
|
|
4
|
|
|
import json |
|
5
|
|
|
|
|
6
|
|
|
import pytest |
|
7
|
|
|
|
|
8
|
|
|
from .fixture import statistics |
|
9
|
|
|
from .fixture import statistics_error |
|
10
|
|
|
from .logger import Logger |
|
11
|
|
|
from .storage import Storage |
|
12
|
|
|
from .table import ResultsTable |
|
13
|
|
|
from .utils import SecondsDecimal |
|
14
|
|
|
from .utils import annotate_source |
|
15
|
|
|
from .utils import first_or_value |
|
16
|
|
|
from .utils import get_machine_id |
|
17
|
|
|
from .utils import load_timer |
|
18
|
|
|
from .utils import short_filename |
|
19
|
|
|
|
|
20
|
|
|
|
|
21
|
|
|
class PerformanceRegression(pytest.UsageError): |
|
22
|
|
|
pass |
|
23
|
|
|
|
|
24
|
|
|
|
|
25
|
|
|
class BenchmarkSession(object): |
|
26
|
|
|
compared_mapping = None |
|
27
|
|
|
groups = None |
|
28
|
|
|
|
|
29
|
|
|
def __init__(self, config): |
|
30
|
|
|
self.verbose = config.getoption("benchmark_verbose") |
|
31
|
|
|
self.logger = Logger(self.verbose, config) |
|
32
|
|
|
self.config = config |
|
33
|
|
|
self.performance_regressions = [] |
|
34
|
|
|
self.benchmarks = [] |
|
35
|
|
|
self.machine_id = get_machine_id() |
|
36
|
|
|
self.machine_info = config.hook.pytest_benchmark_generate_machine_info(config=self.config) |
|
37
|
|
|
self.config.hook.pytest_benchmark_update_machine_info( |
|
38
|
|
|
config=self.config, |
|
39
|
|
|
machine_info=self.machine_info |
|
40
|
|
|
) |
|
41
|
|
|
|
|
42
|
|
|
self.options = dict( |
|
43
|
|
|
min_time=SecondsDecimal(config.getoption("benchmark_min_time")), |
|
44
|
|
|
min_rounds=config.getoption("benchmark_min_rounds"), |
|
45
|
|
|
max_time=SecondsDecimal(config.getoption("benchmark_max_time")), |
|
46
|
|
|
timer=load_timer(config.getoption("benchmark_timer")), |
|
47
|
|
|
calibration_precision=config.getoption("benchmark_calibration_precision"), |
|
48
|
|
|
disable_gc=config.getoption("benchmark_disable_gc"), |
|
49
|
|
|
warmup=config.getoption("benchmark_warmup"), |
|
50
|
|
|
warmup_iterations=config.getoption("benchmark_warmup_iterations"), |
|
51
|
|
|
) |
|
52
|
|
|
self.skip = config.getoption("benchmark_skip") |
|
53
|
|
|
self.disable = config.getoption("benchmark_disable") |
|
54
|
|
|
|
|
55
|
|
|
if config.getoption("dist", "no") != "no" and not self.skip: |
|
56
|
|
|
self.logger.warn( |
|
57
|
|
|
"BENCHMARK-U2", |
|
58
|
|
|
"Benchmarks are automatically disabled because xdist plugin is active." |
|
59
|
|
|
"Benchmarks cannot be performed reliably in a parallelized environment.", |
|
60
|
|
|
fslocation="::" |
|
61
|
|
|
) |
|
62
|
|
|
self.disable = True |
|
63
|
|
|
if hasattr(config, "slaveinput"): |
|
64
|
|
|
self.disable = True |
|
65
|
|
|
if not statistics: |
|
66
|
|
|
self.logger.warn( |
|
67
|
|
|
"BENCHMARK-U3", |
|
68
|
|
|
"Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" % |
|
69
|
|
|
statistics_error, |
|
70
|
|
|
fslocation="::" |
|
71
|
|
|
) |
|
72
|
|
|
self.disable = True |
|
73
|
|
|
|
|
74
|
|
|
self.only = config.getoption("benchmark_only") |
|
75
|
|
|
self.sort = config.getoption("benchmark_sort") |
|
76
|
|
|
self.columns = config.getoption("benchmark_columns") |
|
77
|
|
|
if self.skip and self.only: |
|
78
|
|
|
raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.") |
|
79
|
|
|
if self.disable and self.only: |
|
80
|
|
|
raise pytest.UsageError( |
|
81
|
|
|
"Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is " |
|
82
|
|
|
"automatically activated if xdist is on or you're missing the statistics dependency.") |
|
83
|
|
|
self.group_by = config.getoption("benchmark_group_by") |
|
84
|
|
|
self.save = config.getoption("benchmark_save") |
|
85
|
|
|
self.autosave = config.getoption("benchmark_autosave") |
|
86
|
|
|
self.save_data = config.getoption("benchmark_save_data") |
|
87
|
|
|
self.json = config.getoption("benchmark_json") |
|
88
|
|
|
self.compare = config.getoption("benchmark_compare") |
|
89
|
|
|
self.compare_fail = config.getoption("benchmark_compare_fail") |
|
90
|
|
|
|
|
91
|
|
|
self.storage = Storage(config.getoption("benchmark_storage"), |
|
92
|
|
|
default_machine_id=self.machine_id, logger=self.logger) |
|
93
|
|
|
self.histogram = first_or_value(config.getoption("benchmark_histogram"), False) |
|
94
|
|
|
|
|
95
|
|
|
def prepare_benchmarks(self): |
|
96
|
|
|
for bench in self.benchmarks: |
|
97
|
|
|
if bench: |
|
98
|
|
|
compared = False |
|
99
|
|
|
for path, compared_mapping in self.compared_mapping.items(): |
|
100
|
|
|
if bench.fullname in compared_mapping: |
|
101
|
|
|
compared = compared_mapping[bench.fullname] |
|
102
|
|
|
source = short_filename(path, self.machine_id) |
|
103
|
|
|
flat_bench = bench.as_dict(include_data=False, stats=False) |
|
104
|
|
|
flat_bench.update(compared["stats"]) |
|
105
|
|
|
flat_bench["path"] = str(path) |
|
106
|
|
|
annotate_source(flat_bench, source) |
|
107
|
|
|
if self.compare_fail: |
|
108
|
|
|
for check in self.compare_fail: |
|
109
|
|
|
fail = check.fails(bench, flat_bench) |
|
110
|
|
|
if fail: |
|
111
|
|
|
self.performance_regressions.append((flat_bench["fullname"], fail)) |
|
112
|
|
|
yield flat_bench |
|
113
|
|
|
flat_bench = bench.as_dict(include_data=False, flat=True) |
|
114
|
|
|
flat_bench["path"] = None |
|
115
|
|
|
annotate_source(flat_bench, compared and "NOW") |
|
116
|
|
|
yield flat_bench |
|
117
|
|
|
|
|
118
|
|
|
@property |
|
119
|
|
|
def next_num(self): |
|
120
|
|
|
files = self.storage.query("[0-9][0-9][0-9][0-9]_*") |
|
121
|
|
|
files.sort(reverse=True) |
|
122
|
|
|
if not files: |
|
123
|
|
|
return "0001" |
|
124
|
|
|
for f in files: |
|
125
|
|
|
try: |
|
126
|
|
|
return "%04i" % (int(str(f.name).split('_')[0]) + 1) |
|
127
|
|
|
except ValueError: |
|
128
|
|
|
raise |
|
129
|
|
|
|
|
130
|
|
|
def handle_saving(self): |
|
131
|
|
|
save = self.save or self.autosave |
|
132
|
|
|
if save or self.json: |
|
133
|
|
|
commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config) |
|
134
|
|
|
self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info) |
|
135
|
|
|
|
|
136
|
|
|
if self.json: |
|
137
|
|
|
output_json = self.config.hook.pytest_benchmark_generate_json( |
|
138
|
|
|
config=self.config, |
|
139
|
|
|
benchmarks=self.benchmarks, |
|
140
|
|
|
include_data=True, |
|
141
|
|
|
machine_info=self.machine_info, |
|
142
|
|
|
commit_info=commit_info, |
|
143
|
|
|
) |
|
144
|
|
|
self.config.hook.pytest_benchmark_update_json( |
|
145
|
|
|
config=self.config, |
|
146
|
|
|
benchmarks=self.benchmarks, |
|
147
|
|
|
output_json=output_json, |
|
148
|
|
|
) |
|
149
|
|
|
with self.json as fh: |
|
150
|
|
|
fh.write(json.dumps(output_json, ensure_ascii=True, indent=4).encode()) |
|
151
|
|
|
self.logger.info("Wrote benchmark data in %s" % self.json, purple=True) |
|
152
|
|
|
|
|
153
|
|
|
if save: |
|
154
|
|
|
output_json = self.config.hook.pytest_benchmark_generate_json( |
|
155
|
|
|
config=self.config, |
|
156
|
|
|
benchmarks=self.benchmarks, |
|
157
|
|
|
include_data=self.save_data, |
|
158
|
|
|
machine_info=self.machine_info, |
|
159
|
|
|
commit_info=commit_info, |
|
160
|
|
|
) |
|
161
|
|
|
self.config.hook.pytest_benchmark_update_json( |
|
162
|
|
|
config=self.config, |
|
163
|
|
|
benchmarks=self.benchmarks, |
|
164
|
|
|
output_json=output_json, |
|
165
|
|
|
) |
|
166
|
|
|
output_file = self.storage.get("%s_%s.json" % (self.next_num, save)) |
|
167
|
|
|
assert not output_file.exists() |
|
168
|
|
|
|
|
169
|
|
|
with output_file.open('wb') as fh: |
|
170
|
|
|
fh.write(json.dumps(output_json, ensure_ascii=True, indent=4).encode()) |
|
171
|
|
|
self.logger.info("Saved benchmark data in %s" % output_file) |
|
172
|
|
|
|
|
173
|
|
|
def handle_loading(self): |
|
174
|
|
|
self.compared_mapping = {} |
|
175
|
|
|
if self.compare: |
|
176
|
|
|
if self.compare is True: |
|
177
|
|
|
compared_benchmarks = list(self.storage.load('[0-9][0-9][0-9][0-9]_'))[-1:] |
|
178
|
|
|
else: |
|
179
|
|
|
compared_benchmarks = list(self.storage.load(self.compare)) |
|
180
|
|
|
|
|
181
|
|
|
if not compared_benchmarks: |
|
182
|
|
|
msg = "Can't compare. No benchmark files in %r" % str(self.storage) |
|
183
|
|
|
if self.compare is True: |
|
184
|
|
|
msg += ". Can't load the previous benchmark." |
|
185
|
|
|
code = "BENCHMARK-C2" |
|
186
|
|
|
else: |
|
187
|
|
|
msg += " match %r." % self.compare |
|
188
|
|
|
code = "BENCHMARK-C1" |
|
189
|
|
|
self.logger.warn(code, msg, fslocation=self.storage.location) |
|
190
|
|
|
|
|
191
|
|
|
for path, compared_benchmark in compared_benchmarks: |
|
192
|
|
|
self.config.hook.pytest_benchmark_compare_machine_info( |
|
193
|
|
|
config=self.config, |
|
194
|
|
|
benchmarksession=self, |
|
195
|
|
|
machine_info=self.machine_info, |
|
196
|
|
|
compared_benchmark=compared_benchmark, |
|
197
|
|
|
) |
|
198
|
|
|
self.compared_mapping[path] = dict( |
|
199
|
|
|
(bench['fullname'], bench) for bench in compared_benchmark['benchmarks'] |
|
200
|
|
|
) |
|
201
|
|
|
self.logger.info("Comparing against benchmark %s" % path) |
|
202
|
|
|
|
|
203
|
|
|
def finish(self): |
|
204
|
|
|
self.handle_saving() |
|
205
|
|
|
self.handle_loading() |
|
206
|
|
|
self.groups = self.config.hook.pytest_benchmark_group_stats( |
|
207
|
|
|
config=self.config, |
|
208
|
|
|
benchmarks=self.prepare_benchmarks(), |
|
209
|
|
|
group_by=self.group_by |
|
210
|
|
|
) |
|
211
|
|
|
|
|
212
|
|
|
def display(self, tr): |
|
213
|
|
|
if not self.groups: |
|
214
|
|
|
return |
|
215
|
|
|
|
|
216
|
|
|
tr.ensure_newline() |
|
217
|
|
|
results_table = ResultsTable( |
|
218
|
|
|
columns=self.columns, |
|
219
|
|
|
sort=self.sort, |
|
220
|
|
|
histogram=self.histogram, |
|
221
|
|
|
logger=self.logger |
|
222
|
|
|
) |
|
223
|
|
|
results_table.display(tr, self.groups) |
|
224
|
|
|
self.check_regressions() |
|
225
|
|
|
|
|
226
|
|
|
def check_regressions(self): |
|
227
|
|
|
if self.compare_fail and not self.compared_mapping: |
|
228
|
|
|
raise pytest.UsageError("--benchmark-compare-fail requires valid --benchmark-compare.") |
|
229
|
|
|
|
|
230
|
|
|
if self.performance_regressions: |
|
231
|
|
|
self.logger.error("Performance has regressed:\n%s" % "\n".join( |
|
232
|
|
|
"\t%s - %s" % line for line in self.performance_regressions |
|
233
|
|
|
)) |
|
234
|
|
|
raise PerformanceRegression("Performance has regressed.") |
|
235
|
|
|
|