|
1
|
|
|
import json |
|
2
|
|
|
import logging |
|
3
|
|
|
import os |
|
4
|
|
|
import sys |
|
5
|
|
|
from io import BytesIO |
|
6
|
|
|
from io import StringIO |
|
7
|
|
|
|
|
8
|
|
|
import py |
|
9
|
|
|
import pytest |
|
10
|
|
|
from freezegun import freeze_time |
|
11
|
|
|
from pathlib import Path |
|
12
|
|
|
|
|
13
|
|
|
from pytest_benchmark import plugin |
|
14
|
|
|
from pytest_benchmark.plugin import BenchmarkSession |
|
15
|
|
|
from pytest_benchmark.plugin import pytest_benchmark_compare_machine_info |
|
16
|
|
|
from pytest_benchmark.plugin import pytest_benchmark_generate_json |
|
17
|
|
|
from pytest_benchmark.plugin import pytest_benchmark_group_stats |
|
18
|
|
|
from pytest_benchmark.session import PerformanceRegression |
|
19
|
|
|
from pytest_benchmark.storage import Storage |
|
20
|
|
|
from pytest_benchmark.utils import DifferenceRegressionCheck |
|
21
|
|
|
from pytest_benchmark.utils import PercentageRegressionCheck |
|
22
|
|
|
from pytest_benchmark.utils import get_machine_id |
|
23
|
|
|
|
|
24
|
|
|
pytest_plugins = "pytester" |
|
25
|
|
|
|
|
26
|
|
|
|
|
27
|
|
|
THIS = py.path.local(__file__) |
|
28
|
|
|
STORAGE = THIS.dirpath(THIS.purebasename) |
|
29
|
|
|
|
|
30
|
|
|
SAVE_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open()) |
|
31
|
|
|
JSON_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open()) |
|
32
|
|
|
SAVE_DATA["machine_info"] = JSON_DATA["machine_info"] = {'foo': 'bar'} |
|
33
|
|
|
SAVE_DATA["commit_info"] = JSON_DATA["commit_info"] = {'foo': 'bar'} |
|
34
|
|
|
|
|
35
|
|
|
|
|
36
|
|
|
class Namespace(object): |
|
37
|
|
|
def __init__(self, **kwargs): |
|
38
|
|
|
self.__dict__.update(kwargs) |
|
39
|
|
|
|
|
40
|
|
|
def __getitem__(self, item): |
|
41
|
|
|
return self.__dict__[item] |
|
42
|
|
|
|
|
43
|
|
|
|
|
44
|
|
|
class LooseFileLike(BytesIO): |
|
45
|
|
|
def close(self): |
|
46
|
|
|
value = self.getvalue() |
|
47
|
|
|
super(LooseFileLike, self).close() |
|
48
|
|
|
self.getvalue = lambda: value |
|
49
|
|
|
|
|
50
|
|
|
|
|
51
|
|
|
class MockSession(BenchmarkSession): |
|
52
|
|
|
def __init__(self): |
|
53
|
|
|
self.histogram = True |
|
54
|
|
|
self.storage = Storage(str(STORAGE), default_machine_id=get_machine_id(), logger=None) |
|
55
|
|
|
self.benchmarks = [] |
|
56
|
|
|
self.performance_regressions = [] |
|
57
|
|
|
self.sort = u"min" |
|
58
|
|
|
self.compare = '0001' |
|
59
|
|
|
self.logger = logging.getLogger(__name__) |
|
60
|
|
|
self.machine_id = "FoobarOS" |
|
61
|
|
|
self.machine_info = {'foo': 'bar'} |
|
62
|
|
|
self.save = self.autosave = self.json = False |
|
63
|
|
|
self.options = { |
|
64
|
|
|
'min_rounds': 123, |
|
65
|
|
|
'min_time': 234, |
|
66
|
|
|
'max_time': 345, |
|
67
|
|
|
} |
|
68
|
|
|
self.compare_fail = [] |
|
69
|
|
|
self.config = Namespace(hook=Namespace( |
|
70
|
|
|
pytest_benchmark_group_stats=pytest_benchmark_group_stats, |
|
71
|
|
|
pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'}, |
|
72
|
|
|
pytest_benchmark_update_machine_info=lambda **kwargs: None, |
|
73
|
|
|
pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info, |
|
74
|
|
|
pytest_benchmark_generate_json=pytest_benchmark_generate_json, |
|
75
|
|
|
pytest_benchmark_update_json=lambda **kwargs: None, |
|
76
|
|
|
pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'}, |
|
77
|
|
|
pytest_benchmark_update_commit_info=lambda **kwargs: None, |
|
78
|
|
|
)) |
|
79
|
|
|
self.group_by = 'group' |
|
80
|
|
|
self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr', |
|
81
|
|
|
'outliers', 'rounds', 'iterations'] |
|
82
|
|
|
for bench_file in reversed(self.storage.query("[0-9][0-9][0-9][0-9]_*")): |
|
83
|
|
|
with bench_file.open('rU') as fh: |
|
84
|
|
|
data = json.load(fh) |
|
85
|
|
|
self.benchmarks.extend( |
|
86
|
|
|
Namespace( |
|
87
|
|
|
as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench: |
|
88
|
|
|
dict(_bench, **_bench["stats"]) if flat else dict(_bench), |
|
89
|
|
|
name=bench['name'], |
|
90
|
|
|
fullname=bench['fullname'], |
|
91
|
|
|
group=bench['group'], |
|
92
|
|
|
options=bench['options'], |
|
93
|
|
|
has_error=False, |
|
94
|
|
|
params=None, |
|
95
|
|
|
**bench['stats'] |
|
96
|
|
|
) |
|
97
|
|
|
for bench in data['benchmarks'] |
|
98
|
|
|
) |
|
99
|
|
|
break |
|
100
|
|
|
|
|
101
|
|
|
|
|
102
|
|
|
try: |
|
103
|
|
|
text_type = unicode |
|
104
|
|
|
except NameError: |
|
105
|
|
|
text_type = str |
|
106
|
|
|
|
|
107
|
|
|
|
|
108
|
|
|
def force_text(text): |
|
109
|
|
|
if isinstance(text, text_type): |
|
110
|
|
|
return text |
|
111
|
|
|
else: |
|
112
|
|
|
return text.decode('utf-8') |
|
113
|
|
|
|
|
114
|
|
|
|
|
115
|
|
|
def force_bytes(text): |
|
116
|
|
|
if isinstance(text, text_type): |
|
117
|
|
|
return text.encode('utf-8') |
|
118
|
|
|
else: |
|
119
|
|
|
return text |
|
120
|
|
|
|
|
121
|
|
|
|
|
122
|
|
|
@pytest.fixture |
|
123
|
|
|
def sess(request): |
|
124
|
|
|
return MockSession() |
|
125
|
|
|
|
|
126
|
|
|
|
|
127
|
|
|
def make_logger(sess): |
|
128
|
|
|
output = StringIO() |
|
129
|
|
|
sess.logger = Namespace( |
|
130
|
|
|
warn=lambda code, text, **opts: output.write(u"%s: %s %s\n" % (code, force_text(text), opts)), |
|
131
|
|
|
info=lambda text, **opts: output.write(force_text(text) + u'\n'), |
|
132
|
|
|
error=lambda text: output.write(force_text(text) + u'\n'), |
|
133
|
|
|
) |
|
134
|
|
|
return output |
|
135
|
|
|
|
|
136
|
|
|
|
|
137
|
|
|
def test_rendering(sess): |
|
138
|
|
|
output = make_logger(sess) |
|
139
|
|
|
sess.histogram = os.path.join('docs', 'sample') |
|
140
|
|
|
sess.compare = '*/*' |
|
141
|
|
|
sess.sort = 'name' |
|
142
|
|
|
sess.finish() |
|
143
|
|
|
sess.display(Namespace( |
|
144
|
|
|
ensure_newline=lambda: None, |
|
145
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
|
146
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
|
147
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
|
148
|
|
|
)) |
|
149
|
|
|
|
|
150
|
|
|
|
|
151
|
|
|
def test_regression_checks(sess): |
|
152
|
|
|
output = make_logger(sess) |
|
153
|
|
|
sess.handle_loading() |
|
154
|
|
|
sess.performance_regressions = [] |
|
155
|
|
|
sess.compare_fail = [ |
|
156
|
|
|
PercentageRegressionCheck("stddev", 5), |
|
157
|
|
|
DifferenceRegressionCheck("max", 0.000001) |
|
158
|
|
|
] |
|
159
|
|
|
sess.finish() |
|
160
|
|
|
pytest.raises(PerformanceRegression, sess.display, Namespace( |
|
161
|
|
|
ensure_newline=lambda: None, |
|
162
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
|
163
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
|
164
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
|
165
|
|
|
)) |
|
166
|
|
|
print(output.getvalue()) |
|
167
|
|
|
assert sess.performance_regressions == [ |
|
168
|
|
|
('tests/test_normal.py::test_xfast_parametrized[0]', |
|
169
|
|
|
"Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"), |
|
170
|
|
|
('tests/test_normal.py::test_xfast_parametrized[0]', |
|
171
|
|
|
"Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000") |
|
172
|
|
|
] |
|
173
|
|
|
output = make_logger(sess) |
|
174
|
|
|
pytest.raises(PerformanceRegression, sess.check_regressions) |
|
175
|
|
|
print(output.getvalue()) |
|
176
|
|
|
assert output.getvalue() == """Performance has regressed: |
|
177
|
|
|
\ttests/test_normal.py::test_xfast_parametrized[0] - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000 |
|
178
|
|
|
\ttests/test_normal.py::test_xfast_parametrized[0] - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000 |
|
179
|
|
|
""" |
|
180
|
|
|
|
|
181
|
|
|
|
|
182
|
|
|
@pytest.mark.skipif(sys.version_info[:2] < (2, 7), |
|
183
|
|
|
reason="Something weird going on, see: https://bugs.python.org/issue4482") |
|
184
|
|
|
def test_regression_checks_inf(sess): |
|
185
|
|
|
output = make_logger(sess) |
|
186
|
|
|
sess.compare = '0002' |
|
187
|
|
|
sess.handle_loading() |
|
188
|
|
|
sess.performance_regressions = [] |
|
189
|
|
|
sess.compare_fail = [ |
|
190
|
|
|
PercentageRegressionCheck("stddev", 5), |
|
191
|
|
|
DifferenceRegressionCheck("max", 0.000001) |
|
192
|
|
|
] |
|
193
|
|
|
sess.finish() |
|
194
|
|
|
pytest.raises(PerformanceRegression, sess.display, Namespace( |
|
195
|
|
|
ensure_newline=lambda: None, |
|
196
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
|
197
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
|
198
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
|
199
|
|
|
)) |
|
200
|
|
|
print(output.getvalue()) |
|
201
|
|
|
assert sess.performance_regressions == [ |
|
202
|
|
|
('tests/test_normal.py::test_xfast_parametrized[0]', |
|
203
|
|
|
"Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"), |
|
204
|
|
|
('tests/test_normal.py::test_xfast_parametrized[0]', |
|
205
|
|
|
"Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000") |
|
206
|
|
|
] |
|
207
|
|
|
output = make_logger(sess) |
|
208
|
|
|
pytest.raises(PerformanceRegression, sess.check_regressions) |
|
209
|
|
|
print(output.getvalue()) |
|
210
|
|
|
assert output.getvalue() == """Performance has regressed: |
|
211
|
|
|
\ttests/test_normal.py::test_xfast_parametrized[0] - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000 |
|
212
|
|
|
\ttests/test_normal.py::test_xfast_parametrized[0] - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000 |
|
213
|
|
|
""" |
|
214
|
|
|
|
|
215
|
|
|
# @pytest.fixture |
|
216
|
|
|
# def terminalreporter(request): |
|
217
|
|
|
# return request.config.pluginmanager.getplugin("terminalreporter") |
|
218
|
|
|
|
|
219
|
|
|
# def test_compare_1(sess, terminalreporter, LineMatcher): |
|
220
|
|
|
def test_compare_1(sess, LineMatcher): |
|
221
|
|
|
output = make_logger(sess) |
|
222
|
|
|
sess.handle_loading() |
|
223
|
|
|
sess.finish() |
|
224
|
|
|
sess.display(Namespace( |
|
225
|
|
|
ensure_newline=lambda: None, |
|
226
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
|
227
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
|
228
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
|
229
|
|
|
)) |
|
230
|
|
|
print(output.getvalue()) |
|
231
|
|
|
LineMatcher(output.getvalue().splitlines()).fnmatch_lines([ |
|
232
|
|
|
'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}', |
|
233
|
|
|
'Comparing against benchmark 0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted' |
|
234
|
|
|
'-changes.json', |
|
235
|
|
|
'', |
|
236
|
|
|
'*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*', |
|
237
|
|
|
'Name (time in ns) Min *Max Mean StdDev Median IQR Outliers(*) Rounds Iterations', |
|
238
|
|
|
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
|
239
|
|
|
'test_xfast_parametrized[[]0[]] (0001) 217.3145 (1.0) 11*447.3891 (1.0) 262.2408 (1.00) 214.0442 (1.0) 220.1664 (1.00) 38.2154 (2.03) 90;1878 9987 418', |
|
240
|
|
|
'test_xfast_parametrized[[]0[]] (NOW) 217.9511 (1.00) 13*290.0380 (1.16) 261.2051 (1.0) 263.9842 (1.23) 220.1638 (1.0) 18.8080 (1.0) 160;1726 9710 431', |
|
241
|
|
|
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
|
242
|
|
|
'(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.', |
|
243
|
|
|
]) |
|
244
|
|
|
|
|
245
|
|
|
|
|
246
|
|
|
def test_compare_2(sess, LineMatcher): |
|
247
|
|
|
output = make_logger(sess) |
|
248
|
|
|
sess.compare = '0002' |
|
249
|
|
|
sess.handle_loading() |
|
250
|
|
|
sess.finish() |
|
251
|
|
|
sess.display(Namespace( |
|
252
|
|
|
ensure_newline=lambda: None, |
|
253
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
|
254
|
|
|
section=lambda line, **opts: output.write(force_text(line) + u'\n'), |
|
255
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
|
256
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
|
257
|
|
|
)) |
|
258
|
|
|
print(output.getvalue()) |
|
259
|
|
|
LineMatcher(output.getvalue().splitlines()).fnmatch_lines([ |
|
260
|
|
|
'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}', |
|
261
|
|
|
'Comparing against benchmark 0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes.json', |
|
262
|
|
|
'', |
|
263
|
|
|
'*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*', |
|
264
|
|
|
'Name (time in ns) Min *Max Mean StdDev Median IQR Outliers(*) Rounds Iterations', |
|
265
|
|
|
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
|
266
|
|
|
'test_xfast_parametrized[[]0[]] (0002) 216.9028 (1.0) 7*739.2997 (1.0) 254.0585 (1.0) 0.0000 (1.0) 219.8103 (1.0) 27.3309 (1.45) 235;1688 11009 410', |
|
267
|
|
|
'test_xfast_parametrized[[]0[]] (NOW) 217.9511 (1.00) 13*290.0380 (1.72) 261.2051 (1.03) 263.9842 (inf) 220.1638 (1.00) 18.8080 (1.0) 160;1726 9710 431', |
|
268
|
|
|
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
|
269
|
|
|
'(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.', |
|
270
|
|
|
]) |
|
271
|
|
|
|
|
272
|
|
|
@freeze_time("2015-08-15T00:04:18.687119") |
|
273
|
|
|
def test_save_json(sess, tmpdir, monkeypatch): |
|
274
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
|
275
|
|
|
sess.save = False |
|
276
|
|
|
sess.autosave = False |
|
277
|
|
|
sess.json = LooseFileLike() |
|
278
|
|
|
sess.save_data = False |
|
279
|
|
|
sess.handle_saving() |
|
280
|
|
|
assert tmpdir.listdir() == [] |
|
281
|
|
|
assert json.loads(sess.json.getvalue().decode()) == JSON_DATA |
|
282
|
|
|
|
|
283
|
|
|
|
|
284
|
|
|
@freeze_time("2015-08-15T00:04:18.687119") |
|
|
|
|
|
|
285
|
|
|
def test_save_with_name(sess, tmpdir, monkeypatch): |
|
286
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
|
287
|
|
|
sess.save = 'foobar' |
|
288
|
|
|
sess.autosave = True |
|
289
|
|
|
sess.json = None |
|
290
|
|
|
sess.save_data = False |
|
291
|
|
|
sess.storage.path = Path(str(tmpdir)) |
|
292
|
|
|
sess.handle_saving() |
|
293
|
|
|
files = list(Path(str(tmpdir)).rglob('*.json')) |
|
294
|
|
|
assert len(files) == 1 |
|
295
|
|
|
assert json.load(files[0].open('rU')) == SAVE_DATA |
|
296
|
|
|
|
|
297
|
|
|
|
|
298
|
|
|
@freeze_time("2015-08-15T00:04:18.687119") |
|
|
|
|
|
|
299
|
|
|
def test_save_no_name(sess, tmpdir, monkeypatch): |
|
300
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
|
301
|
|
|
sess.save = True |
|
302
|
|
|
sess.autosave = True |
|
303
|
|
|
sess.json = None |
|
304
|
|
|
sess.save_data = False |
|
305
|
|
|
sess.storage.path = Path(str(tmpdir)) |
|
306
|
|
|
sess.handle_saving() |
|
307
|
|
|
files = list(Path(str(tmpdir)).rglob('*.json')) |
|
308
|
|
|
assert len(files) == 1 |
|
309
|
|
|
assert json.load(files[0].open('rU')) == SAVE_DATA |
|
310
|
|
|
|
|
311
|
|
|
|
|
312
|
|
|
@freeze_time("2015-08-15T00:04:18.687119") |
|
313
|
|
|
def test_save_with_error(sess, tmpdir, monkeypatch): |
|
314
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
|
315
|
|
|
sess.save = True |
|
316
|
|
|
sess.autosave = True |
|
317
|
|
|
sess.json = None |
|
318
|
|
|
sess.save_data = False |
|
319
|
|
|
sess.storage.path = Path(str(tmpdir)) |
|
320
|
|
|
for bench in sess.benchmarks: |
|
321
|
|
|
bench.has_error = True |
|
322
|
|
|
sess.handle_saving() |
|
323
|
|
|
files = list(Path(str(tmpdir)).rglob('*.json')) |
|
324
|
|
|
assert len(files) == 1 |
|
325
|
|
|
assert json.load(files[0].open('rU')) == { |
|
326
|
|
|
'benchmarks': [], |
|
327
|
|
|
'commit_info': {'foo': 'bar'}, |
|
328
|
|
|
'datetime': '2015-08-15T00:04:18.687119', |
|
329
|
|
|
'machine_info': {'foo': 'bar'}, |
|
330
|
|
|
'version': '2.5.0' |
|
331
|
|
|
} |
|
332
|
|
|
|
|
333
|
|
|
|
|
334
|
|
|
@freeze_time("2015-08-15T00:04:18.687119") |
|
|
|
|
|
|
335
|
|
|
def test_autosave(sess, tmpdir, monkeypatch): |
|
336
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
|
337
|
|
|
sess.save = False |
|
338
|
|
|
sess.autosave = True |
|
339
|
|
|
sess.json = None |
|
340
|
|
|
sess.save_data = False |
|
341
|
|
|
sess.storage.path = Path(str(tmpdir)) |
|
342
|
|
|
sess.handle_saving() |
|
343
|
|
|
files = list(Path(str(tmpdir)).rglob('*.json')) |
|
344
|
|
|
assert len(files) == 1 |
|
345
|
|
|
assert json.load(files[0].open('rU')) == SAVE_DATA |
|
346
|
|
|
|
Duplicated code is one of the most pungent code smells. If you need to duplicate the same code in three or more different places, we strongly encourage you to look into extracting the code into a single class or operation.
You can also find more detailed suggestions in the “Code” section of your repository.