1
|
|
|
from __future__ import division |
2
|
|
|
from __future__ import print_function |
3
|
|
|
|
4
|
|
|
import json |
5
|
|
|
|
6
|
|
|
import pytest |
7
|
|
|
|
8
|
|
|
from .fixture import statistics |
9
|
|
|
from .fixture import statistics_error |
10
|
|
|
from .logger import Logger |
11
|
|
|
from .storage import Storage |
12
|
|
|
from .table import ResultsTable |
13
|
|
|
from .utils import SecondsDecimal |
14
|
|
|
from .utils import annotate_source |
15
|
|
|
from .utils import cached_property |
16
|
|
|
from .utils import first_or_value |
17
|
|
|
from .utils import get_machine_id |
18
|
|
|
from .utils import load_timer |
19
|
|
|
from .utils import short_filename |
20
|
|
|
|
21
|
|
|
|
22
|
|
|
class PerformanceRegression(pytest.UsageError): |
23
|
|
|
pass |
24
|
|
|
|
25
|
|
|
|
26
|
|
|
class BenchmarkSession(object): |
27
|
|
|
compared_mapping = None |
28
|
|
|
groups = None |
29
|
|
|
|
30
|
|
|
def __init__(self, config): |
31
|
|
|
self.verbose = config.getoption("benchmark_verbose") |
32
|
|
|
self.logger = Logger(self.verbose, config) |
33
|
|
|
self.config = config |
34
|
|
|
self.performance_regressions = [] |
35
|
|
|
self.benchmarks = [] |
36
|
|
|
self.machine_id = get_machine_id() |
37
|
|
|
|
38
|
|
|
self.options = dict( |
39
|
|
|
min_time=SecondsDecimal(config.getoption("benchmark_min_time")), |
40
|
|
|
min_rounds=config.getoption("benchmark_min_rounds"), |
41
|
|
|
max_time=SecondsDecimal(config.getoption("benchmark_max_time")), |
42
|
|
|
timer=load_timer(config.getoption("benchmark_timer")), |
43
|
|
|
calibration_precision=config.getoption("benchmark_calibration_precision"), |
44
|
|
|
disable_gc=config.getoption("benchmark_disable_gc"), |
45
|
|
|
warmup=config.getoption("benchmark_warmup"), |
46
|
|
|
warmup_iterations=config.getoption("benchmark_warmup_iterations"), |
47
|
|
|
) |
48
|
|
|
self.skip = config.getoption("benchmark_skip") |
49
|
|
|
self.disable = config.getoption("benchmark_disable") |
50
|
|
|
|
51
|
|
|
if config.getoption("dist", "no") != "no" and not self.skip: |
52
|
|
|
self.logger.warn( |
53
|
|
|
"BENCHMARK-U2", |
54
|
|
|
"Benchmarks are automatically disabled because xdist plugin is active." |
55
|
|
|
"Benchmarks cannot be performed reliably in a parallelized environment.", |
56
|
|
|
fslocation="::" |
57
|
|
|
) |
58
|
|
|
self.disable = True |
59
|
|
|
if hasattr(config, "slaveinput"): |
60
|
|
|
self.disable = True |
61
|
|
|
if not statistics: |
62
|
|
|
self.logger.warn( |
63
|
|
|
"BENCHMARK-U3", |
64
|
|
|
"Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" % |
65
|
|
|
statistics_error, |
66
|
|
|
fslocation="::" |
67
|
|
|
) |
68
|
|
|
self.disable = True |
69
|
|
|
|
70
|
|
|
self.only = config.getoption("benchmark_only") |
71
|
|
|
self.sort = config.getoption("benchmark_sort") |
72
|
|
|
self.columns = config.getoption("benchmark_columns") |
73
|
|
|
if self.skip and self.only: |
74
|
|
|
raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.") |
75
|
|
|
if self.disable and self.only: |
76
|
|
|
raise pytest.UsageError( |
77
|
|
|
"Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is " |
78
|
|
|
"automatically activated if xdist is on or you're missing the statistics dependency.") |
79
|
|
|
self.group_by = config.getoption("benchmark_group_by") |
80
|
|
|
self.save = config.getoption("benchmark_save") |
81
|
|
|
self.autosave = config.getoption("benchmark_autosave") |
82
|
|
|
self.save_data = config.getoption("benchmark_save_data") |
83
|
|
|
self.json = config.getoption("benchmark_json") |
84
|
|
|
self.compare = config.getoption("benchmark_compare") |
85
|
|
|
self.compare_fail = config.getoption("benchmark_compare_fail") |
86
|
|
|
|
87
|
|
|
self.storage = Storage(config.getoption("benchmark_storage"), |
88
|
|
|
default_machine_id=self.machine_id, logger=self.logger) |
89
|
|
|
self.histogram = first_or_value(config.getoption("benchmark_histogram"), False) |
90
|
|
|
|
91
|
|
|
@cached_property |
92
|
|
|
def machine_info(self): |
93
|
|
|
obj = self.config.hook.pytest_benchmark_generate_machine_info(config=self.config) |
94
|
|
|
self.config.hook.pytest_benchmark_update_machine_info( |
95
|
|
|
config=self.config, |
96
|
|
|
machine_info=obj |
97
|
|
|
) |
98
|
|
|
return obj |
99
|
|
|
|
100
|
|
|
def prepare_benchmarks(self): |
101
|
|
|
for bench in self.benchmarks: |
102
|
|
|
if bench: |
103
|
|
|
compared = False |
104
|
|
|
for path, compared_mapping in self.compared_mapping.items(): |
105
|
|
|
if bench.fullname in compared_mapping: |
106
|
|
|
compared = compared_mapping[bench.fullname] |
107
|
|
|
source = short_filename(path, self.machine_id) |
108
|
|
|
flat_bench = bench.as_dict(include_data=False, stats=False) |
109
|
|
|
flat_bench.update(compared["stats"]) |
110
|
|
|
flat_bench["path"] = str(path) |
111
|
|
|
annotate_source(flat_bench, source) |
112
|
|
|
if self.compare_fail: |
113
|
|
|
for check in self.compare_fail: |
114
|
|
|
fail = check.fails(bench, flat_bench) |
115
|
|
|
if fail: |
116
|
|
|
self.performance_regressions.append((flat_bench["fullname"], fail)) |
117
|
|
|
yield flat_bench |
118
|
|
|
flat_bench = bench.as_dict(include_data=False, flat=True) |
119
|
|
|
flat_bench["path"] = None |
120
|
|
|
annotate_source(flat_bench, compared and "NOW") |
121
|
|
|
yield flat_bench |
122
|
|
|
|
123
|
|
|
@property |
124
|
|
|
def next_num(self): |
125
|
|
|
files = self.storage.query("[0-9][0-9][0-9][0-9]_*") |
126
|
|
|
files.sort(reverse=True) |
127
|
|
|
if not files: |
128
|
|
|
return "0001" |
129
|
|
|
for f in files: |
130
|
|
|
try: |
131
|
|
|
return "%04i" % (int(str(f.name).split('_')[0]) + 1) |
132
|
|
|
except ValueError: |
133
|
|
|
raise |
134
|
|
|
|
135
|
|
|
def handle_saving(self): |
136
|
|
|
save = self.save or self.autosave |
137
|
|
|
if save or self.json: |
138
|
|
|
commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config) |
139
|
|
|
self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info) |
140
|
|
|
|
141
|
|
|
if self.json: |
142
|
|
|
output_json = self.config.hook.pytest_benchmark_generate_json( |
143
|
|
|
config=self.config, |
144
|
|
|
benchmarks=self.benchmarks, |
145
|
|
|
include_data=True, |
146
|
|
|
machine_info=self.machine_info, |
147
|
|
|
commit_info=commit_info, |
148
|
|
|
) |
149
|
|
|
self.config.hook.pytest_benchmark_update_json( |
150
|
|
|
config=self.config, |
151
|
|
|
benchmarks=self.benchmarks, |
152
|
|
|
output_json=output_json, |
153
|
|
|
) |
154
|
|
|
with self.json as fh: |
155
|
|
|
fh.write(json.dumps(output_json, ensure_ascii=True, indent=4).encode()) |
156
|
|
|
self.logger.info("Wrote benchmark data in %s" % self.json, purple=True) |
157
|
|
|
|
158
|
|
|
if save: |
159
|
|
|
output_json = self.config.hook.pytest_benchmark_generate_json( |
160
|
|
|
config=self.config, |
161
|
|
|
benchmarks=self.benchmarks, |
162
|
|
|
include_data=self.save_data, |
163
|
|
|
machine_info=self.machine_info, |
164
|
|
|
commit_info=commit_info, |
165
|
|
|
) |
166
|
|
|
self.config.hook.pytest_benchmark_update_json( |
167
|
|
|
config=self.config, |
168
|
|
|
benchmarks=self.benchmarks, |
169
|
|
|
output_json=output_json, |
170
|
|
|
) |
171
|
|
|
output_file = self.storage.get("%s_%s.json" % (self.next_num, save)) |
172
|
|
|
assert not output_file.exists() |
173
|
|
|
|
174
|
|
|
with output_file.open('wb') as fh: |
175
|
|
|
fh.write(json.dumps(output_json, ensure_ascii=True, indent=4).encode()) |
176
|
|
|
self.logger.info("Saved benchmark data in %s" % output_file) |
177
|
|
|
|
178
|
|
|
def handle_loading(self): |
179
|
|
|
self.compared_mapping = {} |
180
|
|
|
if self.compare: |
181
|
|
|
if self.compare is True: |
182
|
|
|
compared_benchmarks = list(self.storage.load('[0-9][0-9][0-9][0-9]_'))[-1:] |
183
|
|
|
else: |
184
|
|
|
compared_benchmarks = list(self.storage.load(self.compare)) |
185
|
|
|
|
186
|
|
|
if not compared_benchmarks: |
187
|
|
|
msg = "Can't compare. No benchmark files in %r" % str(self.storage) |
188
|
|
|
if self.compare is True: |
189
|
|
|
msg += ". Can't load the previous benchmark." |
190
|
|
|
code = "BENCHMARK-C2" |
191
|
|
|
else: |
192
|
|
|
msg += " match %r." % self.compare |
193
|
|
|
code = "BENCHMARK-C1" |
194
|
|
|
self.logger.warn(code, msg, fslocation=self.storage.location) |
195
|
|
|
|
196
|
|
|
for path, compared_benchmark in compared_benchmarks: |
197
|
|
|
self.config.hook.pytest_benchmark_compare_machine_info( |
198
|
|
|
config=self.config, |
199
|
|
|
benchmarksession=self, |
200
|
|
|
machine_info=self.machine_info, |
201
|
|
|
compared_benchmark=compared_benchmark, |
202
|
|
|
) |
203
|
|
|
self.compared_mapping[path] = dict( |
204
|
|
|
(bench['fullname'], bench) for bench in compared_benchmark['benchmarks'] |
205
|
|
|
) |
206
|
|
|
self.logger.info("Comparing against benchmark %s" % path) |
207
|
|
|
|
208
|
|
|
def finish(self): |
209
|
|
|
self.handle_saving() |
210
|
|
|
self.handle_loading() |
211
|
|
|
self.groups = self.config.hook.pytest_benchmark_group_stats( |
212
|
|
|
config=self.config, |
213
|
|
|
benchmarks=self.prepare_benchmarks(), |
214
|
|
|
group_by=self.group_by |
215
|
|
|
) |
216
|
|
|
|
217
|
|
|
def display(self, tr): |
218
|
|
|
if not self.groups: |
219
|
|
|
return |
220
|
|
|
|
221
|
|
|
tr.ensure_newline() |
222
|
|
|
results_table = ResultsTable( |
223
|
|
|
columns=self.columns, |
224
|
|
|
sort=self.sort, |
225
|
|
|
histogram=self.histogram, |
226
|
|
|
logger=self.logger |
227
|
|
|
) |
228
|
|
|
results_table.display(tr, self.groups) |
229
|
|
|
self.check_regressions() |
230
|
|
|
|
231
|
|
|
def check_regressions(self): |
232
|
|
|
if self.compare_fail and not self.compared_mapping: |
233
|
|
|
raise pytest.UsageError("--benchmark-compare-fail requires valid --benchmark-compare.") |
234
|
|
|
|
235
|
|
|
if self.performance_regressions: |
236
|
|
|
self.logger.error("Performance has regressed:\n%s" % "\n".join( |
237
|
|
|
"\t%s - %s" % line for line in self.performance_regressions |
238
|
|
|
)) |
239
|
|
|
raise PerformanceRegression("Performance has regressed.") |
240
|
|
|
|