1
|
|
|
from __future__ import division |
2
|
|
|
from __future__ import print_function |
3
|
|
|
|
4
|
|
|
import pytest |
5
|
|
|
|
6
|
|
|
from .fixture import statistics |
7
|
|
|
from .fixture import statistics_error |
8
|
|
|
from .logger import Logger |
9
|
|
|
from .table import TableResults |
10
|
|
|
from .utils import NAME_FORMATTERS |
11
|
|
|
from .utils import SecondsDecimal |
12
|
|
|
from .utils import cached_property |
13
|
|
|
from .utils import first_or_value |
14
|
|
|
from .utils import get_machine_id |
15
|
|
|
from .utils import load_storage |
16
|
|
|
from .utils import load_timer |
17
|
|
|
from .utils import safe_dumps |
18
|
|
|
from .utils import short_filename |
19
|
|
|
|
20
|
|
|
|
21
|
|
|
class PerformanceRegression(pytest.UsageError): |
22
|
|
|
pass |
23
|
|
|
|
24
|
|
|
|
25
|
|
|
class BenchmarkSession(object): |
26
|
|
|
compared_mapping = None |
27
|
|
|
groups = None |
28
|
|
|
|
29
|
|
|
def __init__(self, config): |
30
|
|
|
self.verbose = config.getoption("benchmark_verbose") |
31
|
|
|
self.logger = Logger(self.verbose, config) |
32
|
|
|
self.config = config |
33
|
|
|
self.performance_regressions = [] |
34
|
|
|
self.benchmarks = [] |
35
|
|
|
self.machine_id = get_machine_id() |
36
|
|
|
self.storage = load_storage( |
37
|
|
|
config.getoption("benchmark_storage"), |
38
|
|
|
logger=self.logger, |
39
|
|
|
default_machine_id=self.machine_id, |
40
|
|
|
netrc=config.getoption("benchmark_netrc") |
41
|
|
|
) |
42
|
|
|
self.options = dict( |
43
|
|
|
min_time=SecondsDecimal(config.getoption("benchmark_min_time")), |
44
|
|
|
min_rounds=config.getoption("benchmark_min_rounds"), |
45
|
|
|
max_time=SecondsDecimal(config.getoption("benchmark_max_time")), |
46
|
|
|
timer=load_timer(config.getoption("benchmark_timer")), |
47
|
|
|
calibration_precision=config.getoption("benchmark_calibration_precision"), |
48
|
|
|
disable_gc=config.getoption("benchmark_disable_gc"), |
49
|
|
|
warmup=config.getoption("benchmark_warmup"), |
50
|
|
|
warmup_iterations=config.getoption("benchmark_warmup_iterations"), |
51
|
|
|
use_cprofile=bool(config.getoption("benchmark_cprofile")), |
52
|
|
|
) |
53
|
|
|
self.skip = config.getoption("benchmark_skip") |
54
|
|
|
self.disabled = config.getoption("benchmark_disable") and not config.getoption("benchmark_enable") |
55
|
|
|
self.cprofile_sort_by = config.getoption("benchmark_cprofile") |
56
|
|
|
|
57
|
|
|
if config.getoption("dist", "no") != "no" and not self.skip: |
58
|
|
|
self.logger.warn( |
59
|
|
|
"BENCHMARK-U2", |
60
|
|
|
"Benchmarks are automatically disabled because xdist plugin is active." |
61
|
|
|
"Benchmarks cannot be performed reliably in a parallelized environment.", |
62
|
|
|
fslocation="::" |
63
|
|
|
) |
64
|
|
|
self.disabled = True |
65
|
|
|
if hasattr(config, "slaveinput"): |
66
|
|
|
self.disabled = True |
67
|
|
|
if not statistics: |
68
|
|
|
self.logger.warn( |
69
|
|
|
"BENCHMARK-U3", |
70
|
|
|
"Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" % |
71
|
|
|
statistics_error, |
72
|
|
|
fslocation="::" |
73
|
|
|
) |
74
|
|
|
self.disabled = True |
75
|
|
|
|
76
|
|
|
self.only = config.getoption("benchmark_only") |
77
|
|
|
self.sort = config.getoption("benchmark_sort") |
78
|
|
|
self.columns = config.getoption("benchmark_columns") |
79
|
|
|
if self.skip and self.only: |
80
|
|
|
raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.") |
81
|
|
|
if self.disabled and self.only: |
82
|
|
|
raise pytest.UsageError( |
83
|
|
|
"Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is " |
84
|
|
|
"automatically activated if xdist is on or you're missing the statistics dependency.") |
85
|
|
|
self.group_by = config.getoption("benchmark_group_by") |
86
|
|
|
self.save = config.getoption("benchmark_save") |
87
|
|
|
self.autosave = config.getoption("benchmark_autosave") |
88
|
|
|
self.save_data = config.getoption("benchmark_save_data") |
89
|
|
|
self.json = config.getoption("benchmark_json") |
90
|
|
|
self.compare = config.getoption("benchmark_compare") |
91
|
|
|
self.compare_fail = config.getoption("benchmark_compare_fail") |
92
|
|
|
self.name_format = NAME_FORMATTERS[config.getoption("benchmark_name")] |
93
|
|
|
|
94
|
|
|
self.histogram = first_or_value(config.getoption("benchmark_histogram"), False) |
95
|
|
|
|
96
|
|
|
@cached_property |
97
|
|
|
def machine_info(self): |
98
|
|
|
obj = self.config.hook.pytest_benchmark_generate_machine_info(config=self.config) |
99
|
|
|
self.config.hook.pytest_benchmark_update_machine_info( |
100
|
|
|
config=self.config, |
101
|
|
|
machine_info=obj |
102
|
|
|
) |
103
|
|
|
return obj |
104
|
|
|
|
105
|
|
|
def prepare_benchmarks(self): |
106
|
|
|
for bench in self.benchmarks: |
107
|
|
|
if bench: |
108
|
|
|
compared = False |
109
|
|
|
for path, compared_mapping in self.compared_mapping.items(): |
110
|
|
|
if bench.fullname in compared_mapping: |
111
|
|
|
compared = compared_mapping[bench.fullname] |
112
|
|
|
source = short_filename(path, self.machine_id) |
113
|
|
|
flat_bench = bench.as_dict(include_data=False, stats=False, cprofile=self.cprofile_sort_by) |
114
|
|
|
flat_bench.update(compared["stats"]) |
115
|
|
|
flat_bench["path"] = str(path) |
116
|
|
|
flat_bench["source"] = source |
117
|
|
|
if self.compare_fail: |
118
|
|
|
for check in self.compare_fail: |
119
|
|
|
fail = check.fails(bench, flat_bench) |
120
|
|
|
if fail: |
121
|
|
|
self.performance_regressions.append((self.name_format(flat_bench), fail)) |
122
|
|
|
yield flat_bench |
123
|
|
|
flat_bench = bench.as_dict( |
124
|
|
|
include_data=False, |
125
|
|
|
flat=True, |
126
|
|
|
cprofile=self.cprofile_sort_by, |
127
|
|
|
columns=self.columns |
128
|
|
|
) |
129
|
|
|
flat_bench["path"] = None |
130
|
|
|
flat_bench["source"] = compared and "NOW" |
131
|
|
|
yield flat_bench |
132
|
|
|
|
133
|
|
|
def save_json(self, output_json): |
134
|
|
|
with self.json as fh: |
135
|
|
|
fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode()) |
136
|
|
|
self.logger.info("Wrote benchmark data in: %s" % self.json, purple=True) |
137
|
|
|
|
138
|
|
View Code Duplication |
def handle_saving(self): |
|
|
|
|
139
|
|
|
save = self.save or self.autosave |
140
|
|
|
if save or self.json: |
141
|
|
|
if not self.benchmarks: |
142
|
|
|
self.logger.warn("BENCHMARK-U2", "Not saving anything, no benchmarks have been run!") |
143
|
|
|
return |
144
|
|
|
commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config) |
145
|
|
|
self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info) |
146
|
|
|
|
147
|
|
|
if self.json: |
148
|
|
|
output_json = self.config.hook.pytest_benchmark_generate_json( |
149
|
|
|
config=self.config, |
150
|
|
|
benchmarks=self.benchmarks, |
151
|
|
|
include_data=True, |
152
|
|
|
machine_info=self.machine_info, |
153
|
|
View Code Duplication |
commit_info=commit_info, |
|
|
|
|
154
|
|
|
) |
155
|
|
|
self.config.hook.pytest_benchmark_update_json( |
156
|
|
|
config=self.config, |
157
|
|
|
benchmarks=self.benchmarks, |
158
|
|
|
output_json=output_json, |
159
|
|
|
) |
160
|
|
|
self.save_json(output_json) |
161
|
|
|
|
162
|
|
|
if save: |
163
|
|
|
output_json = self.config.hook.pytest_benchmark_generate_json( |
164
|
|
|
config=self.config, |
165
|
|
|
benchmarks=self.benchmarks, |
166
|
|
|
include_data=self.save_data, |
167
|
|
|
machine_info=self.machine_info, |
168
|
|
|
commit_info=commit_info, |
169
|
|
|
) |
170
|
|
|
self.config.hook.pytest_benchmark_update_json( |
171
|
|
|
config=self.config, |
172
|
|
|
benchmarks=self.benchmarks, |
173
|
|
|
output_json=output_json, |
174
|
|
|
) |
175
|
|
|
self.storage.save(output_json, save) |
176
|
|
|
|
177
|
|
|
def handle_loading(self): |
178
|
|
|
compared_mapping = {} |
179
|
|
|
if self.compare: |
180
|
|
|
if self.compare is True: |
181
|
|
|
compared_benchmarks = list(self.storage.load())[-1:] |
182
|
|
|
else: |
183
|
|
|
compared_benchmarks = list(self.storage.load(self.compare)) |
184
|
|
|
|
185
|
|
|
if not compared_benchmarks: |
186
|
|
|
msg = "Can't compare. No benchmark files in %r" % str(self.storage) |
187
|
|
|
if self.compare is True: |
188
|
|
|
msg += ". Can't load the previous benchmark." |
189
|
|
|
code = "BENCHMARK-C2" |
190
|
|
|
else: |
191
|
|
|
msg += " match %r." % self.compare |
192
|
|
|
code = "BENCHMARK-C1" |
193
|
|
|
self.logger.warn(code, msg, fslocation=self.storage.location) |
194
|
|
|
|
195
|
|
|
for path, compared_benchmark in compared_benchmarks: |
196
|
|
|
self.config.hook.pytest_benchmark_compare_machine_info( |
197
|
|
|
config=self.config, |
198
|
|
|
benchmarksession=self, |
199
|
|
|
machine_info=self.machine_info, |
200
|
|
|
compared_benchmark=compared_benchmark, |
201
|
|
|
) |
202
|
|
|
compared_mapping[path] = dict( |
203
|
|
|
(bench['fullname'], bench) for bench in compared_benchmark['benchmarks'] |
204
|
|
|
) |
205
|
|
|
self.logger.info("Comparing against benchmarks from: %s" % path, newline=False) |
206
|
|
|
self.compared_mapping = compared_mapping |
207
|
|
|
|
208
|
|
|
def finish(self): |
209
|
|
|
self.handle_saving() |
210
|
|
|
prepared_benchmarks = list(self.prepare_benchmarks()) |
211
|
|
|
if prepared_benchmarks: |
212
|
|
|
self.groups = self.config.hook.pytest_benchmark_group_stats( |
213
|
|
|
config=self.config, |
214
|
|
|
benchmarks=prepared_benchmarks, |
215
|
|
|
group_by=self.group_by |
216
|
|
|
) |
217
|
|
|
|
218
|
|
|
def display(self, tr): |
219
|
|
|
if not self.groups: |
220
|
|
|
return |
221
|
|
|
|
222
|
|
|
tr.ensure_newline() |
223
|
|
|
results_table = TableResults( |
224
|
|
|
columns=self.columns, |
225
|
|
|
sort=self.sort, |
226
|
|
|
histogram=self.histogram, |
227
|
|
|
name_format=self.name_format, |
228
|
|
|
logger=self.logger |
229
|
|
|
) |
230
|
|
|
results_table.display(tr, self.groups) |
231
|
|
|
self.check_regressions() |
232
|
|
|
self.display_cprofile(tr) |
233
|
|
|
|
234
|
|
|
def check_regressions(self): |
235
|
|
|
if self.compare_fail and not self.compared_mapping: |
236
|
|
|
raise pytest.UsageError("--benchmark-compare-fail requires valid --benchmark-compare.") |
237
|
|
|
|
238
|
|
|
if self.performance_regressions: |
239
|
|
|
self.logger.error("Performance has regressed:\n%s" % "\n".join( |
240
|
|
|
"\t%s - %s" % line for line in self.performance_regressions |
241
|
|
|
)) |
242
|
|
|
raise PerformanceRegression("Performance has regressed.") |
243
|
|
|
|
244
|
|
|
def display_cprofile(self, tr): |
245
|
|
|
if self.options["use_cprofile"]: |
246
|
|
|
tr.section("cProfile information") |
247
|
|
|
tr.write_line("Time in s") |
248
|
|
|
for group in self.groups: |
249
|
|
|
group_name, benchmarks = group |
250
|
|
|
for benchmark in benchmarks: |
251
|
|
|
tr.write(benchmark["fullname"], yellow=True) |
252
|
|
|
if benchmark["source"]: |
253
|
|
|
tr.write_line(" ({})".format((benchmark["source"]))) |
254
|
|
|
else: |
255
|
|
|
tr.write("\n") |
256
|
|
|
tr.write_line("ncalls\ttottime\tpercall\tcumtime\tpercall\tfilename:lineno(function)") |
257
|
|
|
for function_info in benchmark["cprofile"]: |
258
|
|
|
line = ("{ncalls_recursion}\t{tottime:.{prec}f}\t{tottime_per:.{prec}f}\t{cumtime:.{prec}f}" |
259
|
|
|
"\t{cumtime_per:.{prec}f}\t{function_name}").format( |
260
|
|
|
prec=4, **function_info) |
261
|
|
|
tr.write_line(line) |
262
|
|
|
tr.write("\n") |
263
|
|
|
|