1
|
|
|
import json |
2
|
|
|
import platform |
3
|
|
|
|
4
|
|
|
import pytest |
5
|
|
|
|
6
|
|
|
pytest_plugins = 'pytester', |
7
|
|
|
platform |
8
|
|
|
|
9
|
|
|
|
10
|
|
|
def test_help(testdir): |
11
|
|
|
result = testdir.runpytest('--help') |
12
|
|
|
result.stdout.fnmatch_lines([ |
13
|
|
|
"*", "*", |
14
|
|
|
"benchmark:", |
15
|
|
|
" --benchmark-min-time=SECONDS", |
16
|
|
|
" Minimum time per round in seconds. Default: '0.000005'", |
17
|
|
|
" --benchmark-max-time=SECONDS", |
18
|
|
|
" Maximum run time per test - it will be repeated until", |
19
|
|
|
" this total time is reached. It may be exceeded if test", |
20
|
|
|
" function is very slow or --benchmark-min-rounds is", |
21
|
|
|
" large (it takes precedence). Default: '1.0'", |
22
|
|
|
" --benchmark-min-rounds=NUM", |
23
|
|
|
" Minimum rounds, even if total time would exceed", |
24
|
|
|
" `--max-time`. Default: 5", |
25
|
|
|
" --benchmark-timer=FUNC", |
26
|
|
|
" Timer to use when measuring time. Default:*", |
27
|
|
|
" --benchmark-calibration-precision=NUM", |
28
|
|
|
" Precision to use when calibrating number of", |
29
|
|
|
" iterations. Precision of 10 will make the timer look", |
30
|
|
|
" 10 times more accurate, at a cost of less precise", |
31
|
|
|
" measure of deviations. Default: 10", |
32
|
|
|
" --benchmark-warmup=[KIND]", |
33
|
|
|
" Activates warmup. Will run the test function up to", |
34
|
|
|
" number of times in the calibration phase. See", |
35
|
|
|
" `--benchmark-warmup-iterations`. Note: Even the warmup", |
36
|
|
|
" phase obeys --benchmark-max-time. Available KIND:", |
37
|
|
|
" 'auto', 'off', 'on'. Default: 'auto' (automatically", |
38
|
|
|
" activate on PyPy).", |
39
|
|
|
" --benchmark-warmup-iterations=NUM", |
40
|
|
|
" Max number of iterations to run in the warmup phase.", |
41
|
|
|
" Default: 100000", |
42
|
|
|
" --benchmark-disable-gc", |
43
|
|
|
" Disable GC during benchmarks.", |
44
|
|
|
" --benchmark-skip Skip running any tests that contain benchmarks.", |
45
|
|
|
" --benchmark-only Only run benchmarks.", |
46
|
|
|
" --benchmark-save=NAME", |
47
|
|
|
" Save the current run into 'STORAGE-", |
48
|
|
|
" PATH/counter_NAME.json'.", |
49
|
|
|
" --benchmark-autosave Autosave the current run into 'STORAGE-", |
50
|
|
|
" PATH/counter*.json", |
51
|
|
|
" --benchmark-save-data", |
52
|
|
|
" Use this to make --benchmark-save and --benchmark-", |
53
|
|
|
" autosave include all the timing data, not just the", |
54
|
|
|
" stats.", |
55
|
|
|
" --benchmark-json=PATH", |
56
|
|
|
" Dump a JSON report into PATH. Note that this will", |
57
|
|
|
" include the complete data (all the timings, not just", |
58
|
|
|
" the stats).", |
59
|
|
|
" --benchmark-compare=[NUM|_ID]", |
60
|
|
|
" Compare the current run against run NUM (or prefix of", |
61
|
|
|
" _id in elasticsearch) or the latest saved run if", |
62
|
|
|
" unspecified.", |
63
|
|
|
" --benchmark-compare-fail=EXPR?[[]EXPR?...[]]", |
64
|
|
|
" Fail test if performance regresses according to given", |
65
|
|
|
" EXPR (eg: min:5% or mean:0.001 for number of seconds).", |
66
|
|
|
" Can be used multiple times.", |
67
|
|
|
" --benchmark-cprofile=COLUMN", |
68
|
|
|
" If specified measure one run with cProfile and stores", |
69
|
|
|
" 10 top functions. Argument is a column to sort by.", |
70
|
|
|
" Available columns: 'ncallls_recursion', 'ncalls',", |
71
|
|
|
" 'tottime', 'tottime_per', 'cumtime', 'cumtime_per',", |
72
|
|
|
" 'function_name'.", |
73
|
|
|
" --benchmark-storage=URI", |
74
|
|
|
" Specify a path to store the runs as uri in form", |
75
|
|
|
" file://path or elasticsearch+http[s]://host1,host2/[in", |
76
|
|
|
" dex/doctype?project_name=Project] (when --benchmark-", |
77
|
|
|
" save or --benchmark-autosave are used). For backwards", |
78
|
|
|
" compatibility unexpected values are converted to", |
79
|
|
|
" file://<value>. Default: 'file://./.benchmarks'.", |
80
|
|
|
" --benchmark-verbose Dump diagnostic and progress information.", |
81
|
|
|
" --benchmark-sort=COL Column to sort on. Can be one of: 'min', 'max',", |
82
|
|
|
" 'mean', 'stddev', 'name', 'fullname'. Default: 'min'", |
83
|
|
|
" --benchmark-group-by=LABEL", |
84
|
|
|
" How to group tests. Can be one of: 'group', 'name',", |
85
|
|
|
" 'fullname', 'func', 'fullfunc', 'param' or", |
86
|
|
|
" 'param:NAME', where NAME is the name passed to", |
87
|
|
|
" @pytest.parametrize. Default: 'group'", |
88
|
|
|
" --benchmark-columns=LABELS", |
89
|
|
|
" Comma-separated list of columns to show in the result", |
90
|
|
|
" table. Default: 'min, max, mean, stddev, median, iqr,", |
91
|
|
|
" outliers, rounds, iterations'", |
92
|
|
|
" --benchmark-histogram=[FILENAME-PREFIX]", |
93
|
|
|
" Plot graphs of min/max/avg/stddev over time in", |
94
|
|
|
" FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX", |
95
|
|
|
" contains slashes ('/') then directories will be", |
96
|
|
|
" created. Default: '*'", |
97
|
|
|
"*", |
98
|
|
|
]) |
99
|
|
|
|
100
|
|
|
|
101
|
|
|
def test_groups(testdir): |
102
|
|
|
test = testdir.makepyfile('''""" |
103
|
|
|
>>> print('Yay, doctests!') |
104
|
|
|
Yay, doctests! |
105
|
|
|
""" |
106
|
|
|
import time |
107
|
|
|
import pytest |
108
|
|
|
|
109
|
|
|
def test_fast(benchmark): |
110
|
|
|
benchmark(lambda: time.sleep(0.000001)) |
111
|
|
|
assert 1 == 1 |
112
|
|
|
|
113
|
|
|
def test_slow(benchmark): |
114
|
|
|
benchmark(lambda: time.sleep(0.001)) |
115
|
|
|
assert 1 == 1 |
116
|
|
|
|
117
|
|
|
@pytest.mark.benchmark(group="A") |
118
|
|
|
def test_slower(benchmark): |
119
|
|
|
benchmark(lambda: time.sleep(0.01)) |
120
|
|
|
assert 1 == 1 |
121
|
|
|
|
122
|
|
|
@pytest.mark.benchmark(group="A", warmup=True) |
123
|
|
|
def test_xfast(benchmark): |
124
|
|
|
benchmark(lambda: None) |
125
|
|
|
assert 1 == 1 |
126
|
|
|
''') |
127
|
|
|
result = testdir.runpytest('-vv', '--doctest-modules', test) |
128
|
|
|
result.stdout.fnmatch_lines([ |
129
|
|
|
"*collected 5 items", |
130
|
|
|
"*", |
131
|
|
|
"test_groups.py::*test_groups PASSED*", |
132
|
|
|
"test_groups.py::test_fast PASSED*", |
133
|
|
|
"test_groups.py::test_slow PASSED*", |
134
|
|
|
"test_groups.py::test_slower PASSED*", |
135
|
|
|
"test_groups.py::test_xfast PASSED*", |
136
|
|
|
"*", |
137
|
|
|
"* benchmark: 2 tests *", |
138
|
|
|
"*", |
139
|
|
|
"* benchmark 'A': 2 tests *", |
140
|
|
|
"*", |
141
|
|
|
"*====== 5 passed* seconds ======*", |
142
|
|
|
]) |
143
|
|
|
|
144
|
|
|
|
145
|
|
|
SIMPLE_TEST = ''' |
146
|
|
|
""" |
147
|
|
|
>>> print('Yay, doctests!') |
148
|
|
|
Yay, doctests! |
149
|
|
|
""" |
150
|
|
|
import time |
151
|
|
|
import pytest |
152
|
|
|
|
153
|
|
|
def test_fast(benchmark): |
154
|
|
|
@benchmark |
155
|
|
|
def result(): |
156
|
|
|
return time.sleep(0.000001) |
157
|
|
|
assert result == None |
158
|
|
|
|
159
|
|
|
def test_slow(benchmark): |
160
|
|
|
benchmark(lambda: time.sleep(0.1)) |
161
|
|
|
assert 1 == 1 |
162
|
|
|
''' |
163
|
|
|
|
164
|
|
|
GROUPING_TEST = ''' |
165
|
|
|
import pytest |
166
|
|
|
|
167
|
|
|
@pytest.mark.parametrize("foo", range(2)) |
168
|
|
|
@pytest.mark.benchmark(group="A") |
169
|
|
|
def test_a(benchmark, foo): |
170
|
|
|
benchmark(str) |
171
|
|
|
|
172
|
|
|
@pytest.mark.parametrize("foo", range(2)) |
173
|
|
|
@pytest.mark.benchmark(group="B") |
174
|
|
|
def test_b(benchmark, foo): |
175
|
|
|
benchmark(int) |
176
|
|
|
''' |
177
|
|
|
|
178
|
|
|
GROUPING_PARAMS_TEST = ''' |
179
|
|
|
import pytest |
180
|
|
|
|
181
|
|
|
@pytest.mark.parametrize("bar", ["bar1", "bar2"]) |
182
|
|
|
@pytest.mark.parametrize("foo", ["foo1", "foo2"]) |
183
|
|
|
@pytest.mark.benchmark(group="A") |
184
|
|
|
def test_a(benchmark, foo, bar): |
185
|
|
|
benchmark(str) |
186
|
|
|
|
187
|
|
|
|
188
|
|
|
@pytest.mark.parametrize("bar", ["bar1", "bar2"]) |
189
|
|
|
@pytest.mark.parametrize("foo", ["foo1", "foo2"]) |
190
|
|
|
@pytest.mark.benchmark(group="B") |
191
|
|
|
def test_b(benchmark, foo, bar): |
192
|
|
|
benchmark(int) |
193
|
|
|
''' |
194
|
|
|
|
195
|
|
|
|
196
|
|
|
def test_group_by_name(testdir): |
197
|
|
|
test_x = testdir.makepyfile(test_x=GROUPING_TEST) |
198
|
|
|
test_y = testdir.makepyfile(test_y=GROUPING_TEST) |
199
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'name', test_x, test_y) |
200
|
|
|
result.stdout.fnmatch_lines([ |
201
|
|
|
'*', '*', '*', '*', '*', |
202
|
|
|
"* benchmark 'test_a[[]0[]]': 2 tests *", |
203
|
|
|
'Name (time in ?s) *', |
204
|
|
|
'----------------------*', |
205
|
|
|
'test_a[[]0[]] *', |
206
|
|
|
'test_a[[]0[]] *', |
207
|
|
|
'----------------------*', |
208
|
|
|
'*', |
209
|
|
|
"* benchmark 'test_a[[]1[]]': 2 tests *", |
210
|
|
|
'Name (time in ?s) *', |
211
|
|
|
'----------------------*', |
212
|
|
|
'test_a[[]1[]] *', |
213
|
|
|
'test_a[[]1[]] *', |
214
|
|
|
'----------------------*', |
215
|
|
|
'*', |
216
|
|
|
"* benchmark 'test_b[[]0[]]': 2 tests *", |
217
|
|
|
'Name (time in ?s) *', |
218
|
|
|
'----------------------*', |
219
|
|
|
'test_b[[]0[]] *', |
220
|
|
|
'test_b[[]0[]] *', |
221
|
|
|
'----------------------*', |
222
|
|
|
'*', |
223
|
|
|
"* benchmark 'test_b[[]1[]]': 2 tests *", |
224
|
|
|
'Name (time in ?s) *', |
225
|
|
|
'----------------------*', |
226
|
|
|
'test_b[[]1[]] *', |
227
|
|
|
'test_b[[]1[]] *', |
228
|
|
|
'----------------------*', |
229
|
|
|
]) |
230
|
|
|
|
231
|
|
|
|
232
|
|
|
def test_group_by_func(testdir): |
233
|
|
|
test_x = testdir.makepyfile(test_x=GROUPING_TEST) |
234
|
|
|
test_y = testdir.makepyfile(test_y=GROUPING_TEST) |
235
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'func', test_x, test_y) |
236
|
|
|
result.stdout.fnmatch_lines([ |
237
|
|
|
'*', '*', '*', '*', |
238
|
|
|
"* benchmark 'test_a': 4 tests *", |
239
|
|
|
'Name (time in ?s) *', |
240
|
|
|
'----------------------*', |
241
|
|
|
'test_a[[]*[]] *', |
242
|
|
|
'test_a[[]*[]] *', |
243
|
|
|
'test_a[[]*[]] *', |
244
|
|
|
'test_a[[]*[]] *', |
245
|
|
|
'----------------------*', |
246
|
|
|
'*', |
247
|
|
|
"* benchmark 'test_b': 4 tests *", |
248
|
|
|
'Name (time in ?s) *', |
249
|
|
|
'----------------------*', |
250
|
|
|
'test_b[[]*[]] *', |
251
|
|
|
'test_b[[]*[]] *', |
252
|
|
|
'test_b[[]*[]] *', |
253
|
|
|
'test_b[[]*[]] *', |
254
|
|
|
'----------------------*', |
255
|
|
|
'*', '*', |
256
|
|
|
'============* 8 passed* seconds ============*', |
257
|
|
|
]) |
258
|
|
|
|
259
|
|
|
|
260
|
|
|
def test_group_by_fullfunc(testdir): |
261
|
|
|
test_x = testdir.makepyfile(test_x=GROUPING_TEST) |
262
|
|
|
test_y = testdir.makepyfile(test_y=GROUPING_TEST) |
263
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullfunc', test_x, test_y) |
264
|
|
|
result.stdout.fnmatch_lines([ |
265
|
|
|
'*', '*', '*', '*', '*', |
266
|
|
|
"* benchmark 'test_x.py::test_a': 2 tests *", |
267
|
|
|
'Name (time in ?s) *', |
268
|
|
|
'------------------*', |
269
|
|
|
'test_a[[]*[]] *', |
270
|
|
|
'test_a[[]*[]] *', |
271
|
|
|
'------------------*', |
272
|
|
|
'', |
273
|
|
|
"* benchmark 'test_x.py::test_b': 2 tests *", |
274
|
|
|
'Name (time in ?s) *', |
275
|
|
|
'------------------*', |
276
|
|
|
'test_b[[]*[]] *', |
277
|
|
|
'test_b[[]*[]] *', |
278
|
|
|
'------------------*', |
279
|
|
|
'', |
280
|
|
|
"* benchmark 'test_y.py::test_a': 2 tests *", |
281
|
|
|
'Name (time in ?s) *', |
282
|
|
|
'------------------*', |
283
|
|
|
'test_a[[]*[]] *', |
284
|
|
|
'test_a[[]*[]] *', |
285
|
|
|
'------------------*', |
286
|
|
|
'', |
287
|
|
|
"* benchmark 'test_y.py::test_b': 2 tests *", |
288
|
|
|
'Name (time in ?s) *', |
289
|
|
|
'------------------*', |
290
|
|
|
'test_b[[]*[]] *', |
291
|
|
|
'test_b[[]*[]] *', |
292
|
|
|
'------------------*', |
293
|
|
|
'', |
294
|
|
|
'Legend:', |
295
|
|
|
' Outliers: 1 Standard Deviation from M*', |
296
|
|
|
'============* 8 passed* seconds ============*', |
297
|
|
|
]) |
298
|
|
|
|
299
|
|
|
|
300
|
|
|
def test_group_by_param_all(testdir): |
301
|
|
|
test_x = testdir.makepyfile(test_x=GROUPING_TEST) |
302
|
|
|
test_y = testdir.makepyfile(test_y=GROUPING_TEST) |
303
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x, test_y) |
304
|
|
|
result.stdout.fnmatch_lines([ |
305
|
|
|
'*', '*', '*', '*', '*', |
306
|
|
|
"* benchmark '0': 4 tests *", |
307
|
|
|
'Name (time in ?s) *', |
308
|
|
|
'-------------------*', |
309
|
|
|
'test_*[[]0[]] *', |
310
|
|
|
'test_*[[]0[]] *', |
311
|
|
|
'test_*[[]0[]] *', |
312
|
|
|
'test_*[[]0[]] *', |
313
|
|
|
'-------------------*', |
314
|
|
|
'', |
315
|
|
|
"* benchmark '1': 4 tests *", |
316
|
|
|
'Name (time in ?s) *', |
317
|
|
|
'------------------*', |
318
|
|
|
'test_*[[]1[]] *', |
319
|
|
|
'test_*[[]1[]] *', |
320
|
|
|
'test_*[[]1[]] *', |
321
|
|
|
'test_*[[]1[]] *', |
322
|
|
|
'------------------*', |
323
|
|
|
'', |
324
|
|
|
'Legend:', |
325
|
|
|
' Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd ' |
326
|
|
|
'Quartile.', |
327
|
|
|
'============* 8 passed* seconds ============*', |
328
|
|
|
]) |
329
|
|
|
|
330
|
|
|
|
331
|
|
|
def test_group_by_param_select(testdir): |
332
|
|
|
test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST) |
333
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', |
334
|
|
|
'--benchmark-group-by', 'param:foo', |
335
|
|
|
'--benchmark-sort', 'fullname', |
336
|
|
|
test_x) |
337
|
|
|
result.stdout.fnmatch_lines([ |
338
|
|
|
'*', '*', '*', '*', '*', |
339
|
|
|
"* benchmark 'foo=foo1': 4 tests *", |
340
|
|
|
'Name (time in ?s) *', |
341
|
|
|
'-------------------*', |
342
|
|
|
'test_a[[]foo1-bar1[]] *', |
343
|
|
|
'test_a[[]foo1-bar2[]] *', |
344
|
|
|
'test_b[[]foo1-bar1[]] *', |
345
|
|
|
'test_b[[]foo1-bar2[]] *', |
346
|
|
|
'-------------------*', |
347
|
|
|
'', |
348
|
|
|
"* benchmark 'foo=foo2': 4 tests *", |
349
|
|
|
'Name (time in ?s) *', |
350
|
|
|
'------------------*', |
351
|
|
|
'test_a[[]foo2-bar1[]] *', |
352
|
|
|
'test_a[[]foo2-bar2[]] *', |
353
|
|
|
'test_b[[]foo2-bar1[]] *', |
354
|
|
|
'test_b[[]foo2-bar2[]] *', |
355
|
|
|
'------------------*', |
356
|
|
|
'', |
357
|
|
|
'Legend:', |
358
|
|
|
' Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd ' |
359
|
|
|
'Quartile.', |
360
|
|
|
'============* 8 passed* seconds ============*', |
361
|
|
|
]) |
362
|
|
|
|
363
|
|
|
|
364
|
|
|
def test_group_by_param_select_multiple(testdir): |
365
|
|
|
test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST) |
366
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', |
367
|
|
|
'--benchmark-group-by', 'param:foo,param:bar', |
368
|
|
|
'--benchmark-sort', 'fullname', |
369
|
|
|
test_x) |
370
|
|
|
result.stdout.fnmatch_lines([ |
371
|
|
|
'*', '*', '*', '*', '*', |
372
|
|
|
"* benchmark 'foo=foo1 bar=bar1': 2 tests *", |
373
|
|
|
'Name (time in ?s) *', |
374
|
|
|
'-------------------*', |
375
|
|
|
'test_a[[]foo1-bar1[]] *', |
376
|
|
|
'test_b[[]foo1-bar1[]] *', |
377
|
|
|
'-------------------*', |
378
|
|
|
'', |
379
|
|
|
"* benchmark 'foo=foo1 bar=bar2': 2 tests *", |
380
|
|
|
'Name (time in ?s) *', |
381
|
|
|
'-------------------*', |
382
|
|
|
'test_a[[]foo1-bar2[]] *', |
383
|
|
|
'test_b[[]foo1-bar2[]] *', |
384
|
|
|
'-------------------*', |
385
|
|
|
'', |
386
|
|
|
"* benchmark 'foo=foo2 bar=bar1': 2 tests *", |
387
|
|
|
'Name (time in ?s) *', |
388
|
|
|
'------------------*', |
389
|
|
|
'test_a[[]foo2-bar1[]] *', |
390
|
|
|
'test_b[[]foo2-bar1[]] *', |
391
|
|
|
'-------------------*', |
392
|
|
|
'', |
393
|
|
|
"* benchmark 'foo=foo2 bar=bar2': 2 tests *", |
394
|
|
|
'Name (time in ?s) *', |
395
|
|
|
'-------------------*', |
396
|
|
|
'test_a[[]foo2-bar2[]] *', |
397
|
|
|
'test_b[[]foo2-bar2[]] *', |
398
|
|
|
'------------------*', |
399
|
|
|
'', |
400
|
|
|
'Legend:', |
401
|
|
|
' Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd ' |
402
|
|
|
'Quartile.', |
403
|
|
|
'============* 8 passed* seconds ============*', |
404
|
|
|
]) |
405
|
|
|
|
406
|
|
|
|
407
|
|
|
def test_group_by_fullname(testdir): |
408
|
|
|
test_x = testdir.makepyfile(test_x=GROUPING_TEST) |
409
|
|
|
test_y = testdir.makepyfile(test_y=GROUPING_TEST) |
410
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullname', test_x, test_y) |
411
|
|
|
result.stdout.fnmatch_lines_random([ |
412
|
|
|
"* benchmark 'test_x.py::test_a[[]0[]]': 1 tests *", |
413
|
|
|
"* benchmark 'test_x.py::test_a[[]1[]]': 1 tests *", |
414
|
|
|
"* benchmark 'test_x.py::test_b[[]0[]]': 1 tests *", |
415
|
|
|
"* benchmark 'test_x.py::test_b[[]1[]]': 1 tests *", |
416
|
|
|
"* benchmark 'test_y.py::test_a[[]0[]]': 1 tests *", |
417
|
|
|
"* benchmark 'test_y.py::test_a[[]1[]]': 1 tests *", |
418
|
|
|
"* benchmark 'test_y.py::test_b[[]0[]]': 1 tests *", |
419
|
|
|
"* benchmark 'test_y.py::test_b[[]1[]]': 1 tests *", |
420
|
|
|
'============* 8 passed* seconds ============*', |
421
|
|
|
]) |
422
|
|
|
|
423
|
|
|
|
424
|
|
|
def test_double_use(testdir): |
425
|
|
|
test = testdir.makepyfile(''' |
426
|
|
|
def test_a(benchmark): |
427
|
|
|
benchmark(lambda: None) |
428
|
|
|
benchmark.pedantic(lambda: None) |
429
|
|
|
|
430
|
|
|
def test_b(benchmark): |
431
|
|
|
benchmark.pedantic(lambda: None) |
432
|
|
|
benchmark(lambda: None) |
433
|
|
|
''') |
434
|
|
|
result = testdir.runpytest(test, '--tb=line') |
435
|
|
|
result.stdout.fnmatch_lines([ |
436
|
|
|
'*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark(...) mode.', |
437
|
|
|
'*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark.pedantic(...) mode.', |
438
|
|
|
]) |
439
|
|
|
|
440
|
|
|
|
441
|
|
|
def test_conflict_between_only_and_skip(testdir): |
442
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
443
|
|
|
result = testdir.runpytest('--benchmark-only', '--benchmark-skip', test) |
444
|
|
|
result.stderr.fnmatch_lines([ |
445
|
|
|
"ERROR: Can't have both --benchmark-only and --benchmark-skip options." |
446
|
|
|
]) |
447
|
|
|
|
448
|
|
|
|
449
|
|
|
def test_conflict_between_only_and_disable(testdir): |
450
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
451
|
|
|
result = testdir.runpytest('--benchmark-only', '--benchmark-disable', test) |
452
|
|
|
result.stderr.fnmatch_lines([ |
453
|
|
|
"ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is " |
454
|
|
|
"automatically activated if xdist is on or you're missing the statistics dependency." |
455
|
|
|
]) |
456
|
|
|
|
457
|
|
|
|
458
|
|
|
def test_max_time_min_rounds(testdir): |
459
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
460
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test) |
461
|
|
|
result.stdout.fnmatch_lines([ |
462
|
|
|
"*collected 3 items", |
463
|
|
|
"test_max_time_min_rounds.py ...*", |
464
|
|
|
"* benchmark: 2 tests *", |
465
|
|
|
"Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations", |
466
|
|
|
"------*", |
467
|
|
|
"test_fast * 1 *", |
468
|
|
|
"test_slow * 1 *", |
469
|
|
|
"------*", |
470
|
|
|
"*====== 3 passed* seconds ======*", |
471
|
|
|
]) |
472
|
|
|
|
473
|
|
|
|
474
|
|
|
def test_max_time(testdir): |
475
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
476
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', test) |
477
|
|
|
result.stdout.fnmatch_lines([ |
478
|
|
|
"*collected 3 items", |
479
|
|
|
"test_max_time.py ...*", |
480
|
|
|
"* benchmark: 2 tests *", |
481
|
|
|
"Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations", |
482
|
|
|
"------*", |
483
|
|
|
"test_fast * 5 *", |
484
|
|
|
"test_slow * 5 *", |
485
|
|
|
"------*", |
486
|
|
|
"*====== 3 passed* seconds ======*", |
487
|
|
|
]) |
488
|
|
|
|
489
|
|
|
|
490
|
|
|
def test_bogus_max_time(testdir): |
491
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
492
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=bogus', test) |
493
|
|
|
result.stderr.fnmatch_lines([ |
494
|
|
|
"usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]", |
495
|
|
|
"*py*: error: argument --benchmark-max-time: Invalid decimal value 'bogus': InvalidOperation*", |
496
|
|
|
]) |
497
|
|
|
|
498
|
|
|
|
499
|
|
|
@pytest.mark.skipif("platform.python_implementation() == 'PyPy'") |
500
|
|
|
def test_pep418_timer(testdir): |
501
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
502
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', |
503
|
|
|
'--benchmark-timer=pep418.perf_counter', test) |
504
|
|
|
result.stdout.fnmatch_lines([ |
505
|
|
|
"* (defaults: timer=*.perf_counter*", |
506
|
|
|
]) |
507
|
|
|
|
508
|
|
|
|
509
|
|
|
def test_bad_save(testdir): |
510
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
511
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-save=asd:f?', test) |
512
|
|
|
result.stderr.fnmatch_lines([ |
513
|
|
|
"usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]", |
514
|
|
|
"*py*: error: argument --benchmark-save: Must not contain any of these characters: /:*?<>|\\ (it has ':?')", |
515
|
|
|
]) |
516
|
|
|
|
517
|
|
|
|
518
|
|
|
def test_bad_save_2(testdir): |
519
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
520
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-save=', test) |
521
|
|
|
result.stderr.fnmatch_lines([ |
522
|
|
|
"usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]", |
523
|
|
|
"*py*: error: argument --benchmark-save: Can't be empty.", |
524
|
|
|
]) |
525
|
|
|
|
526
|
|
|
|
527
|
|
|
def test_bad_compare_fail(testdir): |
528
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
529
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-compare-fail=?', test) |
530
|
|
|
result.stderr.fnmatch_lines([ |
531
|
|
|
"usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]", |
532
|
|
|
"*py*: error: argument --benchmark-compare-fail: Could not parse value: '?'.", |
533
|
|
|
]) |
534
|
|
|
|
535
|
|
|
|
536
|
|
|
def test_bad_rounds(testdir): |
537
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
538
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=asd', test) |
539
|
|
|
result.stderr.fnmatch_lines([ |
540
|
|
|
"usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]", |
541
|
|
|
"*py*: error: argument --benchmark-min-rounds: invalid literal for int() with base 10: 'asd'", |
542
|
|
|
]) |
543
|
|
|
|
544
|
|
|
|
545
|
|
|
def test_bad_rounds_2(testdir): |
546
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
547
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=0', test) |
548
|
|
|
result.stderr.fnmatch_lines([ |
549
|
|
|
"usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]", |
550
|
|
|
"*py*: error: argument --benchmark-min-rounds: Value for --benchmark-rounds must be at least 1.", |
551
|
|
|
]) |
552
|
|
|
|
553
|
|
|
|
554
|
|
|
def test_compare(testdir): |
555
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
556
|
|
|
testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test) |
557
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001', |
558
|
|
|
'--benchmark-compare-fail=min:0.1', test) |
559
|
|
|
result.stderr.fnmatch_lines([ |
560
|
|
|
"Comparing against benchmarks from: *0001_unversioned_*.json", |
561
|
|
|
]) |
562
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001', |
563
|
|
|
'--benchmark-compare-fail=min:1%', test) |
564
|
|
|
result.stderr.fnmatch_lines([ |
565
|
|
|
"Comparing against benchmarks from: *0001_unversioned_*.json", |
566
|
|
|
]) |
567
|
|
|
|
568
|
|
|
|
569
|
|
|
def test_compare_last(testdir): |
570
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
571
|
|
|
testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test) |
572
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare', |
573
|
|
|
'--benchmark-compare-fail=min:0.1', test) |
574
|
|
|
result.stderr.fnmatch_lines([ |
575
|
|
|
"Comparing against benchmarks from: *0001_unversioned_*.json", |
576
|
|
|
]) |
577
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare', |
578
|
|
|
'--benchmark-compare-fail=min:1%', test) |
579
|
|
|
result.stderr.fnmatch_lines([ |
580
|
|
|
"Comparing against benchmarks from: *0001_unversioned_*.json", |
581
|
|
|
]) |
582
|
|
|
|
583
|
|
|
|
584
|
|
|
def test_compare_non_existing(testdir): |
585
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
586
|
|
|
testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test) |
587
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', '-rw', |
588
|
|
|
test) |
589
|
|
|
result.stdout.fnmatch_lines([ |
590
|
|
|
"* Can't compare. No benchmark files * '0002'.", |
591
|
|
|
]) |
592
|
|
|
|
593
|
|
|
|
594
|
|
|
def test_compare_non_existing_verbose(testdir): |
595
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
596
|
|
|
testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test) |
597
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', |
598
|
|
|
test, '--benchmark-verbose') |
599
|
|
|
result.stderr.fnmatch_lines([ |
600
|
|
|
" WARNING: Can't compare. No benchmark files * '0002'.", |
601
|
|
|
]) |
602
|
|
|
|
603
|
|
|
|
604
|
|
|
def test_compare_no_files(testdir): |
605
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
606
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw', |
607
|
|
|
test, '--benchmark-compare') |
608
|
|
|
result.stdout.fnmatch_lines([ |
609
|
|
|
"* Can't compare. No benchmark files in '*'." |
610
|
|
|
" Can't load the previous benchmark." |
611
|
|
|
]) |
612
|
|
|
|
613
|
|
|
|
614
|
|
|
def test_compare_no_files_verbose(testdir): |
615
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
616
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', |
617
|
|
|
test, '--benchmark-compare', '--benchmark-verbose') |
618
|
|
|
result.stderr.fnmatch_lines([ |
619
|
|
|
" WARNING: Can't compare. No benchmark files in '*'." |
620
|
|
|
" Can't load the previous benchmark." |
621
|
|
|
]) |
622
|
|
|
|
623
|
|
|
|
624
|
|
|
def test_compare_no_files_match(testdir): |
625
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
626
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw', |
627
|
|
|
test, '--benchmark-compare=1') |
628
|
|
|
result.stdout.fnmatch_lines([ |
629
|
|
|
"* Can't compare. No benchmark files in '*' match '1'." |
630
|
|
|
]) |
631
|
|
|
|
632
|
|
|
|
633
|
|
|
def test_compare_no_files_match_verbose(testdir): |
634
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
635
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', |
636
|
|
|
test, '--benchmark-compare=1', '--benchmark-verbose') |
637
|
|
|
result.stderr.fnmatch_lines([ |
638
|
|
|
" WARNING: Can't compare. No benchmark files in '*' match '1'." |
639
|
|
|
]) |
640
|
|
|
|
641
|
|
|
|
642
|
|
|
def test_verbose(testdir): |
643
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
644
|
|
|
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-verbose', |
645
|
|
|
'-vv', test) |
646
|
|
|
result.stderr.fnmatch_lines([ |
647
|
|
|
" Timer precision: *s", |
648
|
|
|
" Calibrating to target round *s; will estimate when reaching *s.", |
649
|
|
|
" Measured * iterations: *s.", |
650
|
|
|
" Running * rounds x * iterations ...", |
651
|
|
|
" Ran for *s.", |
652
|
|
|
]) |
653
|
|
|
|
654
|
|
|
|
655
|
|
|
def test_save(testdir): |
656
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
657
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar', |
658
|
|
|
'--benchmark-max-time=0.0000001', test) |
659
|
|
|
result.stderr.fnmatch_lines([ |
660
|
|
|
"Saved benchmark data in: *", |
661
|
|
|
]) |
662
|
|
|
json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read()) |
663
|
|
|
|
664
|
|
|
|
665
|
|
|
def test_save_extra_info(testdir): |
666
|
|
|
test = testdir.makepyfile(""" |
667
|
|
|
def test_extra(benchmark): |
668
|
|
|
benchmark.extra_info['foo'] = 'bar' |
669
|
|
|
benchmark(lambda: None) |
670
|
|
|
""") |
671
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar', |
672
|
|
|
'--benchmark-max-time=0.0000001', test) |
673
|
|
|
result.stderr.fnmatch_lines([ |
674
|
|
|
"Saved benchmark data in: *", |
675
|
|
|
]) |
676
|
|
|
info = json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read()) |
677
|
|
|
bench_info = info['benchmarks'][0] |
678
|
|
|
assert bench_info['name'] == 'test_extra' |
679
|
|
|
assert bench_info['extra_info'] == {'foo': 'bar'} |
680
|
|
|
|
681
|
|
|
|
682
|
|
|
|
683
|
|
|
def test_update_machine_info_hook_detection(testdir): |
684
|
|
|
"""Tests detection and execution and update_machine_info_hooks. |
685
|
|
|
|
686
|
|
|
Verifies that machine info hooks are detected and executed in nested |
687
|
|
|
`conftest.py`s. |
688
|
|
|
|
689
|
|
|
""" |
690
|
|
|
|
691
|
|
|
record_path_conftest = ''' |
692
|
|
|
import os |
693
|
|
|
|
694
|
|
|
def pytest_benchmark_update_machine_info(config, machine_info): |
695
|
|
|
machine_info["conftest_path"] = ( |
696
|
|
|
machine_info.get("conftest_path", []) + [os.path.relpath(__file__)] |
697
|
|
|
) |
698
|
|
|
''' |
699
|
|
|
|
700
|
|
|
simple_test = ''' |
701
|
|
|
def test_simple(benchmark): |
702
|
|
|
@benchmark |
703
|
|
|
def resuilt(): |
704
|
|
|
1+1 |
705
|
|
|
''' |
706
|
|
|
|
707
|
|
|
testdir.makepyfile(**{ |
708
|
|
|
"conftest" : record_path_conftest, |
709
|
|
|
"test_module/conftest" : record_path_conftest, |
710
|
|
|
"test_module/tests/conftest" : record_path_conftest, |
711
|
|
|
"test_module/tests/simple_test.py" : simple_test, |
712
|
|
|
}) |
713
|
|
|
|
714
|
|
|
def run_verify_pytest(*args): |
715
|
|
|
testdir.runpytest( |
716
|
|
|
'--benchmark-json=benchmark.json', |
717
|
|
|
'--benchmark-max-time=0.0000001', |
718
|
|
|
*args |
719
|
|
|
) |
720
|
|
|
|
721
|
|
|
benchmark_json = json.loads(testdir.tmpdir.join('benchmark.json').read()) |
722
|
|
|
machine_info = benchmark_json["machine_info"] |
723
|
|
|
|
724
|
|
|
assert sorted(machine_info["conftest_path"]) == sorted([ |
725
|
|
|
"conftest.py", |
726
|
|
|
"test_module/conftest.py", |
727
|
|
|
"test_module/tests/conftest.py", |
728
|
|
|
]) |
729
|
|
|
|
730
|
|
|
run_verify_pytest("test_module/tests") |
731
|
|
|
run_verify_pytest("test_module") |
732
|
|
|
run_verify_pytest(".") |
733
|
|
|
|
734
|
|
|
|
735
|
|
|
def test_histogram(testdir): |
736
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
737
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-histogram=foobar', |
738
|
|
|
'--benchmark-max-time=0.0000001', test) |
739
|
|
|
result.stderr.fnmatch_lines([ |
740
|
|
|
"Generated histogram: *foobar.svg", |
741
|
|
|
]) |
742
|
|
|
assert [f.basename for f in testdir.tmpdir.listdir("*.svg", sort=True)] == [ |
743
|
|
|
'foobar.svg', |
744
|
|
|
] |
745
|
|
|
|
746
|
|
|
|
747
|
|
|
def test_autosave(testdir): |
748
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
749
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-autosave', |
750
|
|
|
'--benchmark-max-time=0.0000001', test) |
751
|
|
|
result.stderr.fnmatch_lines([ |
752
|
|
|
"Saved benchmark data in: *", |
753
|
|
|
]) |
754
|
|
|
json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].listdir('0001_*.json')[0].read()) |
755
|
|
|
|
756
|
|
|
|
757
|
|
|
def test_bogus_min_time(testdir): |
758
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
759
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-min-time=bogus', test) |
760
|
|
|
result.stderr.fnmatch_lines([ |
761
|
|
|
"usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]", |
762
|
|
|
"py*: error: argument --benchmark-min-time: Invalid decimal value 'bogus': InvalidOperation*", |
763
|
|
|
]) |
764
|
|
|
|
765
|
|
|
|
766
|
|
|
def test_disable_gc(testdir): |
767
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
768
|
|
|
result = testdir.runpytest('--benchmark-disable-gc', test) |
769
|
|
|
result.stdout.fnmatch_lines([ |
770
|
|
|
"*collected 2 items", |
771
|
|
|
"test_disable_gc.py ..*", |
772
|
|
|
"* benchmark: 2 tests *", |
773
|
|
|
"Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations", |
774
|
|
|
"------*", |
775
|
|
|
"test_fast *", |
776
|
|
|
"test_slow *", |
777
|
|
|
"------*", |
778
|
|
|
"*====== 2 passed* seconds ======*", |
779
|
|
|
]) |
780
|
|
|
|
781
|
|
|
|
782
|
|
|
def test_custom_timer(testdir): |
783
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
784
|
|
|
result = testdir.runpytest('--benchmark-timer=time.time', test) |
785
|
|
|
result.stdout.fnmatch_lines([ |
786
|
|
|
"*collected 2 items", |
787
|
|
|
"test_custom_timer.py ..*", |
788
|
|
|
"* benchmark: 2 tests *", |
789
|
|
|
"Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations", |
790
|
|
|
"------*", |
791
|
|
|
"test_fast *", |
792
|
|
|
"test_slow *", |
793
|
|
|
"------*", |
794
|
|
|
"*====== 2 passed* seconds ======*", |
795
|
|
|
]) |
796
|
|
|
|
797
|
|
|
|
798
|
|
|
def test_bogus_timer(testdir): |
799
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
800
|
|
|
result = testdir.runpytest('--benchmark-timer=bogus', test) |
801
|
|
|
result.stderr.fnmatch_lines([ |
802
|
|
|
"usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]", |
803
|
|
|
"*py*: error: argument --benchmark-timer: Value for --benchmark-timer must be in dotted form. Eg: " |
804
|
|
|
"'module.attr'.", |
805
|
|
|
]) |
806
|
|
|
|
807
|
|
|
|
808
|
|
|
def test_sort_by_mean(testdir): |
809
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
810
|
|
|
result = testdir.runpytest('--benchmark-sort=mean', test) |
811
|
|
|
result.stdout.fnmatch_lines([ |
812
|
|
|
"*collected 2 items", |
813
|
|
|
"test_sort_by_mean.py ..*", |
814
|
|
|
"* benchmark: 2 tests *", |
815
|
|
|
"Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations", |
816
|
|
|
"------*", |
817
|
|
|
"test_fast *", |
818
|
|
|
"test_slow *", |
819
|
|
|
"------*", |
820
|
|
|
"*====== 2 passed* seconds ======*", |
821
|
|
|
]) |
822
|
|
|
|
823
|
|
|
|
824
|
|
|
def test_bogus_sort(testdir): |
825
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
826
|
|
|
result = testdir.runpytest('--benchmark-sort=bogus', test) |
827
|
|
|
result.stderr.fnmatch_lines([ |
828
|
|
|
"usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]", |
829
|
|
|
"*py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one " |
830
|
|
|
"of: 'min', 'max', 'mean', 'stddev', 'name', 'fullname'." |
831
|
|
|
]) |
832
|
|
|
|
833
|
|
|
|
834
|
|
|
def test_xdist(testdir): |
835
|
|
|
pytest.importorskip('xdist') |
836
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
837
|
|
|
result = testdir.runpytest('--doctest-modules', '-n', '1', '-rw', test) |
838
|
|
|
result.stdout.fnmatch_lines([ |
839
|
|
|
"* Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be " |
840
|
|
|
"performed reliably in a parallelized environment.", |
841
|
|
|
]) |
842
|
|
|
|
843
|
|
|
|
844
|
|
|
def test_xdist_verbose(testdir): |
845
|
|
|
pytest.importorskip('xdist') |
846
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
847
|
|
|
result = testdir.runpytest('--doctest-modules', '-n', '1', '--benchmark-verbose', test) |
848
|
|
|
result.stderr.fnmatch_lines([ |
849
|
|
|
"------*", |
850
|
|
|
" WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed " |
851
|
|
|
"reliably in a parallelized environment.", |
852
|
|
|
"------*", |
853
|
|
|
]) |
854
|
|
|
|
855
|
|
|
|
856
|
|
|
def test_cprofile(testdir): |
857
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
858
|
|
|
result = testdir.runpytest('--benchmark-cprofile=cumtime', test) |
859
|
|
|
result.stdout.fnmatch_lines([ |
860
|
|
|
"============*=========== cProfile information ============*===========", |
861
|
|
|
"Time in s", |
862
|
|
|
"test_cprofile.py::test_fast", |
863
|
|
|
"ncalls tottime percall cumtime percall filename:lineno(function)", |
864
|
|
|
# "1 0.0000 0.0000 0.0001 0.0001 test_cprofile0/test_cprofile.py:9(result)", |
865
|
|
|
# "1 0.0001 0.0001 0.0001 0.0001 ~:0(<built-in method time.sleep>)", |
866
|
|
|
# "1 0.0000 0.0000 0.0000 0.0000 ~:0(<method 'disable' of '_lsprof.Profiler' objects>)", |
867
|
|
|
"", |
868
|
|
|
"test_cprofile.py::test_slow", |
869
|
|
|
"ncalls tottime percall cumtime percall filename:lineno(function)", |
870
|
|
|
# "1 0.0000 0.0000 0.1002 0.1002 test_cprofile0/test_cprofile.py:15(<lambda>)", |
871
|
|
|
# "1 0.1002 0.1002 0.1002 0.1002 ~:0(<built-in method time.sleep>)", |
872
|
|
|
# "1 0.0000 0.0000 0.0000 0.0000 ~:0(<method 'disable' of '_lsprof.Profiler' objects>)", |
873
|
|
|
]) |
874
|
|
|
|
875
|
|
|
|
876
|
|
|
def test_abort_broken(testdir): |
877
|
|
|
""" |
878
|
|
|
Test that we don't benchmark code that raises exceptions. |
879
|
|
|
""" |
880
|
|
|
test = testdir.makepyfile(''' |
881
|
|
|
""" |
882
|
|
|
>>> print('Yay, doctests!') |
883
|
|
|
Yay, doctests! |
884
|
|
|
""" |
885
|
|
|
import time |
886
|
|
|
import pytest |
887
|
|
|
|
888
|
|
|
def test_bad(benchmark): |
889
|
|
|
@benchmark |
890
|
|
|
def result(): |
891
|
|
|
raise Exception() |
892
|
|
|
assert 1 == 1 |
893
|
|
|
|
894
|
|
|
def test_bad2(benchmark): |
895
|
|
|
@benchmark |
896
|
|
|
def result(): |
897
|
|
|
time.sleep(0.1) |
898
|
|
|
assert 1 == 0 |
899
|
|
|
|
900
|
|
|
@pytest.fixture(params=['a', 'b', 'c']) |
901
|
|
|
def bad_fixture(request): |
902
|
|
|
raise ImportError() |
903
|
|
|
|
904
|
|
|
def test_ok(benchmark, bad_fixture): |
905
|
|
|
@benchmark |
906
|
|
|
def result(): |
907
|
|
|
time.sleep(0.1) |
908
|
|
|
assert 1 == 0 |
909
|
|
|
''') |
910
|
|
|
result = testdir.runpytest('-vv', test) |
911
|
|
|
result.stdout.fnmatch_lines([ |
912
|
|
|
"*collected 5 items", |
913
|
|
|
"*", |
914
|
|
|
"test_abort_broken.py::test_bad FAILED*", |
915
|
|
|
"test_abort_broken.py::test_bad2 FAILED*", |
916
|
|
|
"test_abort_broken.py::test_ok*a* ERROR*", |
917
|
|
|
"test_abort_broken.py::test_ok*b* ERROR*", |
918
|
|
|
"test_abort_broken.py::test_ok*c* ERROR*", |
919
|
|
|
|
920
|
|
|
"*====== ERRORS ======*", |
921
|
|
|
"*______ ERROR at setup of test_ok[[]a[]] ______*", |
922
|
|
|
|
923
|
|
|
"request = <SubRequest 'bad_fixture' for <Function 'test_ok[a]'>>", |
924
|
|
|
|
925
|
|
|
" @pytest.fixture(params=['a', 'b', 'c'])", |
926
|
|
|
" def bad_fixture(request):", |
927
|
|
|
"> raise ImportError()", |
928
|
|
|
"E ImportError", |
929
|
|
|
|
930
|
|
|
"test_abort_broken.py:22: ImportError", |
931
|
|
|
"*______ ERROR at setup of test_ok[[]b[]] ______*", |
932
|
|
|
|
933
|
|
|
"request = <SubRequest 'bad_fixture' for <Function 'test_ok[b]'>>", |
934
|
|
|
|
935
|
|
|
" @pytest.fixture(params=['a', 'b', 'c'])", |
936
|
|
|
" def bad_fixture(request):", |
937
|
|
|
"> raise ImportError()", |
938
|
|
|
"E ImportError", |
939
|
|
|
|
940
|
|
|
"test_abort_broken.py:22: ImportError", |
941
|
|
|
"*______ ERROR at setup of test_ok[[]c[]] ______*", |
942
|
|
|
|
943
|
|
|
"request = <SubRequest 'bad_fixture' for <Function 'test_ok[c]'>>", |
944
|
|
|
|
945
|
|
|
" @pytest.fixture(params=['a', 'b', 'c'])", |
946
|
|
|
" def bad_fixture(request):", |
947
|
|
|
"> raise ImportError()", |
948
|
|
|
"E ImportError", |
949
|
|
|
|
950
|
|
|
"test_abort_broken.py:22: ImportError", |
951
|
|
|
"*====== FAILURES ======*", |
952
|
|
|
"*______ test_bad ______*", |
953
|
|
|
|
954
|
|
|
"benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>", |
955
|
|
|
|
956
|
|
|
" def test_bad(benchmark):", |
957
|
|
|
"> @benchmark", |
958
|
|
|
" def result():", |
959
|
|
|
|
960
|
|
|
"test_abort_broken.py:*", |
961
|
|
|
"_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*", |
962
|
|
|
"*", |
963
|
|
|
"_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*", |
964
|
|
|
|
965
|
|
|
" @benchmark", |
966
|
|
|
" def result():", |
967
|
|
|
"> raise Exception()", |
968
|
|
|
"E Exception", |
969
|
|
|
|
970
|
|
|
"test_abort_broken.py:11: Exception", |
971
|
|
|
"*______ test_bad2 ______*", |
972
|
|
|
|
973
|
|
|
"benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>", |
974
|
|
|
|
975
|
|
|
" def test_bad2(benchmark):", |
976
|
|
|
" @benchmark", |
977
|
|
|
" def result():", |
978
|
|
|
" time.sleep(0.1)", |
979
|
|
|
"> assert 1 == 0", |
980
|
|
|
"E assert 1 == 0", |
981
|
|
|
|
982
|
|
|
"test_abort_broken.py:18: AssertionError", |
983
|
|
|
]) |
984
|
|
|
|
985
|
|
|
result.stdout.fnmatch_lines([ |
986
|
|
|
"* benchmark: 1 tests *", |
987
|
|
|
"Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations", |
988
|
|
|
"------*", |
989
|
|
|
"test_bad2 *", |
990
|
|
|
"------*", |
991
|
|
|
|
992
|
|
|
"*====== 2 failed*, 3 error* seconds ======*", |
993
|
|
|
]) |
994
|
|
|
|
995
|
|
|
|
996
|
|
|
BASIC_TEST = ''' |
997
|
|
|
""" |
998
|
|
|
Just to make sure the plugin doesn't choke on doctests:: |
999
|
|
|
>>> print('Yay, doctests!') |
1000
|
|
|
Yay, doctests! |
1001
|
|
|
""" |
1002
|
|
|
import time |
1003
|
|
|
from functools import partial |
1004
|
|
|
|
1005
|
|
|
import pytest |
1006
|
|
|
|
1007
|
|
|
def test_fast(benchmark): |
1008
|
|
|
@benchmark |
1009
|
|
|
def result(): |
1010
|
|
|
return time.sleep(0.000001) |
1011
|
|
|
assert result is None |
1012
|
|
|
|
1013
|
|
|
def test_slow(benchmark): |
1014
|
|
|
assert benchmark(partial(time.sleep, 0.001)) is None |
1015
|
|
|
|
1016
|
|
|
def test_slower(benchmark): |
1017
|
|
|
benchmark(lambda: time.sleep(0.01)) |
1018
|
|
|
|
1019
|
|
|
@pytest.mark.benchmark(min_rounds=2) |
1020
|
|
|
def test_xfast(benchmark): |
1021
|
|
|
benchmark(str) |
1022
|
|
|
|
1023
|
|
|
def test_fast(benchmark): |
1024
|
|
|
benchmark(int) |
1025
|
|
|
''' |
1026
|
|
|
|
1027
|
|
|
|
1028
|
|
|
def test_basic(testdir): |
1029
|
|
|
test = testdir.makepyfile(BASIC_TEST) |
1030
|
|
|
result = testdir.runpytest('-vv', '--doctest-modules', test) |
1031
|
|
|
result.stdout.fnmatch_lines([ |
1032
|
|
|
"*collected 5 items", |
1033
|
|
|
"test_basic.py::*test_basic PASSED*", |
1034
|
|
|
"test_basic.py::test_slow PASSED*", |
1035
|
|
|
"test_basic.py::test_slower PASSED*", |
1036
|
|
|
"test_basic.py::test_xfast PASSED*", |
1037
|
|
|
"test_basic.py::test_fast PASSED*", |
1038
|
|
|
"", |
1039
|
|
|
"* benchmark: 4 tests *", |
1040
|
|
|
"Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations", |
1041
|
|
|
"------*", |
1042
|
|
|
"test_* *", |
1043
|
|
|
"test_* *", |
1044
|
|
|
"test_* *", |
1045
|
|
|
"test_* *", |
1046
|
|
|
"------*", |
1047
|
|
|
"", |
1048
|
|
|
"*====== 5 passed* seconds ======*", |
1049
|
|
|
]) |
1050
|
|
|
|
1051
|
|
|
|
1052
|
|
|
def test_skip(testdir): |
1053
|
|
|
test = testdir.makepyfile(BASIC_TEST) |
1054
|
|
|
result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-skip', test) |
1055
|
|
|
result.stdout.fnmatch_lines([ |
1056
|
|
|
"*collected 5 items", |
1057
|
|
|
"test_skip.py::*test_skip PASSED*", |
1058
|
|
|
"test_skip.py::test_slow SKIPPED*", |
1059
|
|
|
"test_skip.py::test_slower SKIPPED*", |
1060
|
|
|
"test_skip.py::test_xfast SKIPPED*", |
1061
|
|
|
"test_skip.py::test_fast SKIPPED*", |
1062
|
|
|
"*====== 1 passed, 4 skipped* seconds ======*", |
1063
|
|
|
]) |
1064
|
|
|
|
1065
|
|
|
|
1066
|
|
|
def test_disable(testdir): |
1067
|
|
|
test = testdir.makepyfile(BASIC_TEST) |
1068
|
|
|
result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test) |
1069
|
|
|
result.stdout.fnmatch_lines([ |
1070
|
|
|
"*collected 5 items", |
1071
|
|
|
"test_disable.py::*test_disable PASSED*", |
1072
|
|
|
"test_disable.py::test_slow PASSED*", |
1073
|
|
|
"test_disable.py::test_slower PASSED*", |
1074
|
|
|
"test_disable.py::test_xfast PASSED*", |
1075
|
|
|
"test_disable.py::test_fast PASSED*", |
1076
|
|
|
"*====== 5 passed * seconds ======*", |
1077
|
|
|
]) |
1078
|
|
|
|
1079
|
|
|
|
1080
|
|
|
def test_mark_selection(testdir): |
1081
|
|
|
test = testdir.makepyfile(BASIC_TEST) |
1082
|
|
|
result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test) |
1083
|
|
|
result.stdout.fnmatch_lines([ |
1084
|
|
|
"*collected 5 items*", |
1085
|
|
|
"test_mark_selection.py::test_xfast PASSED*", |
1086
|
|
|
"* benchmark: 1 tests *", |
1087
|
|
|
"Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations", |
1088
|
|
|
"------*", |
1089
|
|
|
"test_xfast *", |
1090
|
|
|
"------*", |
1091
|
|
|
"*====== 1 passed, 4 deselected* seconds ======*", |
1092
|
|
|
]) |
1093
|
|
|
|
1094
|
|
|
|
1095
|
|
|
def test_only_benchmarks(testdir): |
1096
|
|
|
test = testdir.makepyfile(BASIC_TEST) |
1097
|
|
|
result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-only', test) |
1098
|
|
|
result.stdout.fnmatch_lines([ |
1099
|
|
|
"*collected 5 items", |
1100
|
|
|
"test_only_benchmarks.py::*test_only_benchmarks SKIPPED*", |
1101
|
|
|
"test_only_benchmarks.py::test_slow PASSED*", |
1102
|
|
|
"test_only_benchmarks.py::test_slower PASSED*", |
1103
|
|
|
"test_only_benchmarks.py::test_xfast PASSED*", |
1104
|
|
|
"test_only_benchmarks.py::test_fast PASSED*", |
1105
|
|
|
"* benchmark: 4 tests *", |
1106
|
|
|
"Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations", |
1107
|
|
|
"------*", |
1108
|
|
|
"test_* *", |
1109
|
|
|
"test_* *", |
1110
|
|
|
"test_* *", |
1111
|
|
|
"test_* *", |
1112
|
|
|
"------*", |
1113
|
|
|
"*====== 4 passed, 1 skipped* seconds ======*", |
1114
|
|
|
]) |
1115
|
|
|
|
1116
|
|
|
|
1117
|
|
|
def test_columns(testdir): |
1118
|
|
|
test = testdir.makepyfile(SIMPLE_TEST) |
1119
|
|
|
result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,iterations,min', test) |
1120
|
|
|
result.stdout.fnmatch_lines([ |
1121
|
|
|
"*collected 3 items", |
1122
|
|
|
"test_columns.py ...*", |
1123
|
|
|
"* benchmark: 2 tests *", |
1124
|
|
|
"Name (time in ?s) * Max * Iterations * Min *", |
1125
|
|
|
"------*", |
1126
|
|
|
]) |
1127
|
|
|
|