Completed
Push — master ( 095af2...faa823 )
by Ionel Cristian
53s
created

tests.test_group_by_param_select_multiple()   B

Complexity

Conditions 1

Size

Total Lines 39

Duplication

Lines 0
Ratio 0 %
Metric Value
cc 1
dl 0
loc 39
rs 8.8571
1
import json
2
import platform
3
4
import pytest
5
6
pytest_plugins = 'pytester',
7
platform
8
9
10
def test_help(testdir):
11
    result = testdir.runpytest('--help')
12
    result.stdout.fnmatch_lines([
13
        "*", "*",
14
        "benchmark:",
15
        "  --benchmark-min-time=SECONDS",
16
        "                        Minimum time per round in seconds. Default: '0.000005'",
17
        "  --benchmark-max-time=SECONDS",
18
        "                        Maximum run time per test - it will be repeated until",
19
        "                        this total time is reached. It may be exceeded if test",
20
        "                        function is very slow or --benchmark-min-rounds is",
21
        "                        large (it takes precedence). Default: '1.0'",
22
        "  --benchmark-min-rounds=NUM",
23
        "                        Minimum rounds, even if total time would exceed",
24
        "                        `--max-time`. Default: 5",
25
        "  --benchmark-timer=FUNC",
26
        "                        Timer to use when measuring time. Default:*",
27
        "  --benchmark-calibration-precision=NUM",
28
        "                        Precision to use when calibrating number of",
29
        "                        iterations. Precision of 10 will make the timer look",
30
        "                        10 times more accurate, at a cost of less precise",
31
        "                        measure of deviations. Default: 10",
32
        "  --benchmark-warmup=[KIND]",
33
        "                        Activates warmup. Will run the test function up to",
34
        "                        number of times in the calibration phase. See",
35
        "                        `--benchmark-warmup-iterations`. Note: Even the warmup",
36
        "                        phase obeys --benchmark-max-time. Available KIND:",
37
        "                        'auto', 'off', 'on'. Default: 'auto' (automatically",
38
        "                        activate on PyPy).",
39
        "  --benchmark-warmup-iterations=NUM",
40
        "                        Max number of iterations to run in the warmup phase.",
41
        "                        Default: 100000",
42
        "  --benchmark-disable-gc",
43
        "                        Disable GC during benchmarks.",
44
        "  --benchmark-skip      Skip running any tests that contain benchmarks.",
45
        "  --benchmark-only      Only run benchmarks.",
46
        "  --benchmark-save=NAME",
47
        "                        Save the current run into 'STORAGE-",
48
        "                        PATH/counter_NAME.json'.",
49
        "  --benchmark-autosave  Autosave the current run into 'STORAGE-",
50
        "                        PATH/counter_*.json",
51
        "  --benchmark-save-data",
52
        "                        Use this to make --benchmark-save and --benchmark-",
53
        "                        autosave include all the timing data, not just the",
54
        "                        stats.",
55
        "  --benchmark-json=PATH",
56
        "                        Dump a JSON report into PATH. Note that this will",
57
        "                        include the complete data (all the timings, not just",
58
        "                        the stats).",
59
        "  --benchmark-compare=[NUM]",
60
        "                        Compare the current run against run NUM or the latest",
61
        "                        saved run if unspecified.",
62
        "  --benchmark-compare-fail=EXPR=[EXPR=...]",
63
        "                        Fail test if performance regresses according to given",
64
        "                        EXPR (eg: min:5% or mean:0.001 for number of seconds).",
65
        "                        Can be used multiple times.",
66
        "  --benchmark-storage=STORAGE-PATH",
67
        "                        Specify a different path to store the runs (when",
68
        "                        --benchmark-save or --benchmark-autosave are used).",
69
        "                        Default: './.benchmarks'",
70
        "  --benchmark-verbose   Dump diagnostic and progress information.",
71
        "  --benchmark-sort=COL  Column to sort on. Can be one of: 'min', 'max',",
72
        "                        'mean', 'stddev', 'name', 'fullname'. Default: 'min'",
73
        "  --benchmark-group-by=LABEL",
74
        "                        How to group tests. Can be one of: 'group', 'name',",
75
        "                        'fullname', 'func', 'fullfunc', 'param' or",
76
        "                        'param:NAME', where NAME is the name passed to",
77
        "                        @pytest.parametrize. Default: 'group'",
78
        "  --benchmark-columns=LABELS",
79
        "                        Comma-separated list of columns to show in the result",
80
        "                        table. Default: 'min, max, mean, stddev, median, iqr,",
81
        "                        outliers, rounds, iterations'",
82
        "  --benchmark-histogram=[FILENAME-PREFIX]",
83
        "                        Plot graphs of min/max/avg/stddev over time in",
84
        "                        FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX",
85
        "                        contains slashes ('/') then directories will be",
86
        "                        created. Default: '*'",
87
        "*",
88
    ])
89
90
91
def test_groups(testdir):
92
    test = testdir.makepyfile('''"""
93
    >>> print('Yay, doctests!')
94
    Yay, doctests!
95
"""
96
import time
97
import pytest
98
99
def test_fast(benchmark):
100
    benchmark(lambda: time.sleep(0.000001))
101
    assert 1 == 1
102
103
def test_slow(benchmark):
104
    benchmark(lambda: time.sleep(0.001))
105
    assert 1 == 1
106
107
@pytest.mark.benchmark(group="A")
108
def test_slower(benchmark):
109
    benchmark(lambda: time.sleep(0.01))
110
    assert 1 == 1
111
112
@pytest.mark.benchmark(group="A", warmup=True)
113
def test_xfast(benchmark):
114
    benchmark(lambda: None)
115
    assert 1 == 1
116
''')
117
    result = testdir.runpytest('-vv', '--doctest-modules', test)
118
    result.stdout.fnmatch_lines([
119
        "*collected 5 items",
120
        "*",
121
        "test_groups.py::*test_groups PASSED",
122
        "test_groups.py::test_fast PASSED",
123
        "test_groups.py::test_slow PASSED",
124
        "test_groups.py::test_slower PASSED",
125
        "test_groups.py::test_xfast PASSED",
126
        "*",
127
        "* benchmark: 2 tests *",
128
        "*",
129
        "* benchmark 'A': 2 tests *",
130
        "*",
131
        "*====== 5 passed* seconds ======*",
132
    ])
133
134
135
SIMPLE_TEST = '''
136
"""
137
    >>> print('Yay, doctests!')
138
    Yay, doctests!
139
"""
140
import time
141
import pytest
142
143
def test_fast(benchmark):
144
    @benchmark
145
    def result():
146
        return time.sleep(0.000001)
147
    assert result == None
148
149
def test_slow(benchmark):
150
    benchmark(lambda: time.sleep(0.1))
151
    assert 1 == 1
152
'''
153
154
GROUPING_TEST = '''
155
import pytest
156
157
@pytest.mark.parametrize("foo", range(2))
158
@pytest.mark.benchmark(group="A")
159
def test_a(benchmark, foo):
160
    benchmark(str)
161
162
@pytest.mark.parametrize("foo", range(2))
163
@pytest.mark.benchmark(group="B")
164
def test_b(benchmark, foo):
165
    benchmark(int)
166
'''
167
168
GROUPING_PARAMS_TEST = '''
169
import pytest
170
171
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
172
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
173
@pytest.mark.benchmark(group="A")
174
def test_a(benchmark, foo, bar):
175
    benchmark(str)
176
177
178
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
179
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
180
@pytest.mark.benchmark(group="B")
181
def test_b(benchmark, foo, bar):
182
    benchmark(int)
183
'''
184
185
186
def test_group_by_name(testdir):
187
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
188
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
189
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'name', test_x, test_y)
190
    result.stdout.fnmatch_lines([
191
        '*', '*', '*', '*', '*',
192
        "* benchmark 'test_a[[]0[]]': 2 tests *",
193
        'Name (time in ?s)     *',
194
        '----------------------*',
195
        'test_a[[]0[]]             *',
196
        'test_a[[]0[]]             *',
197
        '----------------------*',
198
        '*',
199
        "* benchmark 'test_a[[]1[]]': 2 tests *",
200
        'Name (time in ?s)     *',
201
        '----------------------*',
202
        'test_a[[]1[]]             *',
203
        'test_a[[]1[]]             *',
204
        '----------------------*',
205
        '*',
206
        "* benchmark 'test_b[[]0[]]': 2 tests *",
207
        'Name (time in ?s)     *',
208
        '----------------------*',
209
        'test_b[[]0[]]             *',
210
        'test_b[[]0[]]             *',
211
        '----------------------*',
212
        '*',
213
        "* benchmark 'test_b[[]1[]]': 2 tests *",
214
        'Name (time in ?s)     *',
215
        '----------------------*',
216
        'test_b[[]1[]]             *',
217
        'test_b[[]1[]]             *',
218
        '----------------------*',
219
    ])
220
221
222
def test_group_by_func(testdir):
223
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
224
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
225
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'func', test_x, test_y)
226
    result.stdout.fnmatch_lines([
227
        '*', '*', '*', '*',
228
        "* benchmark 'test_a': 4 tests *",
229
        'Name (time in ?s)     *',
230
        '----------------------*',
231
        'test_a[[]*[]]             *',
232
        'test_a[[]*[]]             *',
233
        'test_a[[]*[]]             *',
234
        'test_a[[]*[]]             *',
235
        '----------------------*',
236
        '*',
237
        "* benchmark 'test_b': 4 tests *",
238
        'Name (time in ?s)     *',
239
        '----------------------*',
240
        'test_b[[]*[]]             *',
241
        'test_b[[]*[]]             *',
242
        'test_b[[]*[]]             *',
243
        'test_b[[]*[]]             *',
244
        '----------------------*',
245
        '*', '*',
246
        '============* 8 passed* seconds ============*',
247
    ])
248
249
250
def test_group_by_fullfunc(testdir):
251
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
252
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
253
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullfunc', test_x, test_y)
254
    result.stdout.fnmatch_lines([
255
        '*', '*', '*', '*', '*',
256
        "* benchmark 'test_x.py::test_a': 2 tests *",
257
        'Name (time in ?s) *',
258
        '------------------*',
259
        'test_a[[]*[]]         *',
260
        'test_a[[]*[]]         *',
261
        '------------------*',
262
        '',
263
        "* benchmark 'test_x.py::test_b': 2 tests *",
264
        'Name (time in ?s) *',
265
        '------------------*',
266
        'test_b[[]*[]]         *',
267
        'test_b[[]*[]]         *',
268
        '------------------*',
269
        '',
270
        "* benchmark 'test_y.py::test_a': 2 tests *",
271
        'Name (time in ?s) *',
272
        '------------------*',
273
        'test_a[[]*[]]         *',
274
        'test_a[[]*[]]         *',
275
        '------------------*',
276
        '',
277
        "* benchmark 'test_y.py::test_b': 2 tests *",
278
        'Name (time in ?s) *',
279
        '------------------*',
280
        'test_b[[]*[]]         *',
281
        'test_b[[]*[]]         *',
282
        '------------------*',
283
        '',
284
        '(*) Outliers: 1 Standard Deviation from M*',
285
        '============* 8 passed* seconds ============*',
286
    ])
287
288
289
def test_group_by_param_all(testdir):
290
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
291
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
292
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x, test_y)
293
    result.stdout.fnmatch_lines([
294
        '*', '*', '*', '*', '*',
295
        "* benchmark '0': 4 tests *",
296
        'Name (time in ?s)  *',
297
        '-------------------*',
298
        'test_*[[]0[]]          *',
299
        'test_*[[]0[]]          *',
300
        'test_*[[]0[]]          *',
301
        'test_*[[]0[]]          *',
302
        '-------------------*',
303
        '',
304
        "* benchmark '1': 4 tests *",
305
        'Name (time in ?s) *',
306
        '------------------*',
307
        'test_*[[]1[]]         *',
308
        'test_*[[]1[]]         *',
309
        'test_*[[]1[]]         *',
310
        'test_*[[]1[]]         *',
311
        '------------------*',
312
        '',
313
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
314
        'Quartile.',
315
        '============* 8 passed* seconds ============*',
316
    ])
317
318
def test_group_by_param_select(testdir):
319
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
320
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
321
                               '--benchmark-group-by', 'param:foo',
322
                               '--benchmark-sort', 'fullname',
323
                               test_x)
324
    result.stdout.fnmatch_lines([
325
        '*', '*', '*', '*', '*',
326
        "* benchmark 'foo=foo1': 4 tests *",
327
        'Name (time in ?s)  *',
328
        '-------------------*',
329
        'test_a[[]foo1-bar1[]]    *',
330
        'test_a[[]foo1-bar2[]]    *',
331
        'test_b[[]foo1-bar1[]]    *',
332
        'test_b[[]foo1-bar2[]]    *',
333
        '-------------------*',
334
        '',
335
        "* benchmark 'foo=foo2': 4 tests *",
336
        'Name (time in ?s) *',
337
        '------------------*',
338
        'test_a[[]foo2-bar1[]]    *',
339
        'test_a[[]foo2-bar2[]]    *',
340
        'test_b[[]foo2-bar1[]]    *',
341
        'test_b[[]foo2-bar2[]]    *',
342
        '------------------*',
343
        '',
344
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
345
        'Quartile.',
346
        '============* 8 passed* seconds ============*',
347
    ])
348
349
350
def test_group_by_param_select_multiple(testdir):
351
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
352
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
353
                               '--benchmark-group-by', 'param:foo,param:bar',
354
                               '--benchmark-sort', 'fullname',
355
                               test_x)
356
    result.stdout.fnmatch_lines([
357
        '*', '*', '*', '*', '*',
358
        "* benchmark 'foo=foo1 bar=bar1': 2 tests *",
359
        'Name (time in ?s)  *',
360
        '-------------------*',
361
        'test_a[[]foo1-bar1[]]    *',
362
        'test_b[[]foo1-bar1[]]    *',
363
        '-------------------*',
364
        '',
365
        "* benchmark 'foo=foo1 bar=bar2': 2 tests *",
366
        'Name (time in ?s)  *',
367
        '-------------------*',
368
        'test_a[[]foo1-bar2[]]    *',
369
        'test_b[[]foo1-bar2[]]    *',
370
        '-------------------*',
371
        '',
372
        "* benchmark 'foo=foo2 bar=bar1': 2 tests *",
373
        'Name (time in ?s) *',
374
        '------------------*',
375
        'test_a[[]foo2-bar1[]]    *',
376
        'test_b[[]foo2-bar1[]]    *',
377
        '-------------------*',
378
        '',
379
        "* benchmark 'foo=foo2 bar=bar2': 2 tests *",
380
        'Name (time in ?s)  *',
381
        '-------------------*',
382
        'test_a[[]foo2-bar2[]]    *',
383
        'test_b[[]foo2-bar2[]]    *',
384
        '------------------*',
385
        '',
386
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
387
        'Quartile.',
388
        '============* 8 passed* seconds ============*',
389
    ])
390
391
def test_group_by_fullname(testdir):
392
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
393
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
394
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullname', test_x, test_y)
395
    result.stdout.fnmatch_lines_random([
396
        "* benchmark 'test_x.py::test_a[[]0[]]': 1 tests *",
397
        "* benchmark 'test_x.py::test_a[[]1[]]': 1 tests *",
398
        "* benchmark 'test_x.py::test_b[[]0[]]': 1 tests *",
399
        "* benchmark 'test_x.py::test_b[[]1[]]': 1 tests *",
400
        "* benchmark 'test_y.py::test_a[[]0[]]': 1 tests *",
401
        "* benchmark 'test_y.py::test_a[[]1[]]': 1 tests *",
402
        "* benchmark 'test_y.py::test_b[[]0[]]': 1 tests *",
403
        "* benchmark 'test_y.py::test_b[[]1[]]': 1 tests *",
404
        '============* 8 passed* seconds ============*',
405
    ])
406
407
408
def test_double_use(testdir):
409
    test = testdir.makepyfile('''
410
def test_a(benchmark):
411
    benchmark(lambda: None)
412
    benchmark.pedantic(lambda: None)
413
414
def test_b(benchmark):
415
    benchmark.pedantic(lambda: None)
416
    benchmark(lambda: None)
417
''')
418
    result = testdir.runpytest(test, '--tb=line')
419
    result.stdout.fnmatch_lines([
420
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark(...) mode.',
421
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark.pedantic(...) mode.',
422
    ])
423
424
425
def test_conflict_between_only_and_skip(testdir):
426
    test = testdir.makepyfile(SIMPLE_TEST)
427
    result = testdir.runpytest('--benchmark-only', '--benchmark-skip', test)
428
    result.stderr.fnmatch_lines([
429
        "ERROR: Can't have both --benchmark-only and --benchmark-skip options."
430
    ])
431
432
433
def test_conflict_between_only_and_disable(testdir):
434
    test = testdir.makepyfile(SIMPLE_TEST)
435
    result = testdir.runpytest('--benchmark-only', '--benchmark-disable', test)
436
    result.stderr.fnmatch_lines([
437
        "ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
438
        "automatically activated if xdist is on or you're missing the statistics dependency."
439
    ])
440
441
442
def test_max_time_min_rounds(testdir):
443
    test = testdir.makepyfile(SIMPLE_TEST)
444
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test)
445
    result.stdout.fnmatch_lines([
446
        "*collected 3 items",
447
        "test_max_time_min_rounds.py ...",
448
        "* benchmark: 2 tests *",
449
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
450
        "------*",
451
        "test_fast          * 1  *",
452
        "test_slow          * 1  *",
453
        "------*",
454
        "*====== 3 passed* seconds ======*",
455
    ])
456
457
458
def test_max_time(testdir):
459
    test = testdir.makepyfile(SIMPLE_TEST)
460
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', test)
461
    result.stdout.fnmatch_lines([
462
        "*collected 3 items",
463
        "test_max_time.py ...",
464
        "* benchmark: 2 tests *",
465
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
466
        "------*",
467
        "test_fast          * 5  *",
468
        "test_slow          * 5  *",
469
        "------*",
470
        "*====== 3 passed* seconds ======*",
471
    ])
472
473
474
def test_bogus_max_time(testdir):
475
    test = testdir.makepyfile(SIMPLE_TEST)
476
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=bogus', test)
477
    result.stderr.fnmatch_lines([
478
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
479
        "py*: error: argument --benchmark-max-time: Invalid decimal value 'bogus': InvalidOperation*",
480
    ])
481
482
483
@pytest.mark.skipif("platform.python_implementation() == 'PyPy'")
484
def test_pep418_timer(testdir):
485
    test = testdir.makepyfile(SIMPLE_TEST)
486
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
487
                               '--benchmark-timer=pep418.perf_counter', test)
488
    result.stdout.fnmatch_lines([
489
        "* (defaults: timer=*.perf_counter*",
490
    ])
491
492
493
def test_bad_save(testdir):
494
    test = testdir.makepyfile(SIMPLE_TEST)
495
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=asd:f?', test)
496
    result.stderr.fnmatch_lines([
497
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
498
        "py*: error: argument --benchmark-save: Must not contain any of these characters: /:*?<>|\\ (it has ':?')",
499
    ])
500
501
502
def test_bad_save_2(testdir):
503
    test = testdir.makepyfile(SIMPLE_TEST)
504
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=', test)
505
    result.stderr.fnmatch_lines([
506
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
507
        "py*: error: argument --benchmark-save: Can't be empty.",
508
    ])
509
510
511
def test_bad_compare_fail(testdir):
512
    test = testdir.makepyfile(SIMPLE_TEST)
513
    result = testdir.runpytest('--doctest-modules', '--benchmark-compare-fail=?', test)
514
    result.stderr.fnmatch_lines([
515
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
516
        "py*: error: argument --benchmark-compare-fail: Could not parse value: '?'.",
517
    ])
518
519
520
def test_bad_rounds(testdir):
521
    test = testdir.makepyfile(SIMPLE_TEST)
522
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=asd', test)
523
    result.stderr.fnmatch_lines([
524
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
525
        "py*: error: argument --benchmark-min-rounds: invalid literal for int() with base 10: 'asd'",
526
    ])
527
528
529
def test_bad_rounds_2(testdir):
530
    test = testdir.makepyfile(SIMPLE_TEST)
531
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=0', test)
532
    result.stderr.fnmatch_lines([
533
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
534
        "py*: error: argument --benchmark-min-rounds: Value for --benchmark-rounds must be at least 1.",
535
    ])
536
537
538
def test_compare(testdir):
539
    test = testdir.makepyfile(SIMPLE_TEST)
540
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
541
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
542
                               '--benchmark-compare-fail=min:0.1', test)
543
    result.stderr.fnmatch_lines([
544
        "Comparing against benchmark *0001_unversioned_*.json",
545
    ])
546
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
547
                               '--benchmark-compare-fail=min:1%', test)
548
    result.stderr.fnmatch_lines([
549
        "Comparing against benchmark *0001_unversioned_*.json",
550
    ])
551
552
553
def test_compare_last(testdir):
554
    test = testdir.makepyfile(SIMPLE_TEST)
555
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
556
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
557
                               '--benchmark-compare-fail=min:0.1', test)
558
    result.stderr.fnmatch_lines([
559
        "Comparing against benchmark *0001_unversioned_*.json",
560
    ])
561
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
562
                               '--benchmark-compare-fail=min:1%', test)
563
    result.stderr.fnmatch_lines([
564
        "Comparing against benchmark *0001_unversioned_*.json",
565
    ])
566
567
568
def test_compare_non_existing(testdir):
569
    test = testdir.makepyfile(SIMPLE_TEST)
570
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
571
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', '-rw',
572
                               test)
573
    result.stdout.fnmatch_lines([
574
        "WBENCHMARK-C1 * Can't compare. No benchmark files * '0002'.",
575
    ])
576
577
578
def test_compare_non_existing_verbose(testdir):
579
    test = testdir.makepyfile(SIMPLE_TEST)
580
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
581
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002',
582
                               test, '--benchmark-verbose')
583
    result.stderr.fnmatch_lines([
584
        " WARNING: Can't compare. No benchmark files * '0002'.",
585
    ])
586
587
588
def test_compare_no_files(testdir):
589
    test = testdir.makepyfile(SIMPLE_TEST)
590
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
591
                               test, '--benchmark-compare')
592
    result.stdout.fnmatch_lines([
593
         "WBENCHMARK-C2 * Can't compare. No benchmark files in '*'."
594
         " Can't load the previous benchmark."
595
    ])
596
597
598
def test_compare_no_files_verbose(testdir):
599
    test = testdir.makepyfile(SIMPLE_TEST)
600
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
601
                               test, '--benchmark-compare', '--benchmark-verbose')
602
    result.stderr.fnmatch_lines([
603
        " WARNING: Can't compare. No benchmark files in '*'."
604
        " Can't load the previous benchmark."
605
    ])
606
607
608
def test_compare_no_files_match(testdir):
609
    test = testdir.makepyfile(SIMPLE_TEST)
610
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
611
                               test, '--benchmark-compare=1')
612
    result.stdout.fnmatch_lines([
613
        "WBENCHMARK-C1 * Can't compare. No benchmark files in '*' match '1'."
614
    ])
615
616
617
def test_compare_no_files_match_verbose(testdir):
618
    test = testdir.makepyfile(SIMPLE_TEST)
619
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
620
                               test, '--benchmark-compare=1', '--benchmark-verbose')
621
    result.stderr.fnmatch_lines([
622
        " WARNING: Can't compare. No benchmark files in '*' match '1'."
623
    ])
624
625
626
def test_verbose(testdir):
627
    test = testdir.makepyfile(SIMPLE_TEST)
628
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-verbose',
629
                               '-vv', test)
630
    result.stderr.fnmatch_lines([
631
        "  Timer precision: *s",
632
        "  Calibrating to target round *s; will estimate when reaching *s.",
633
        "    Measured * iterations: *s.",
634
        "  Running * rounds x * iterations ...",
635
        "  Ran for *s.",
636
    ])
637
638
639
def test_save(testdir):
640
    test = testdir.makepyfile(SIMPLE_TEST)
641
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
642
                               '--benchmark-max-time=0.0000001', test)
643
    result.stderr.fnmatch_lines([
644
        "Saved benchmark data in *",
645
    ])
646
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
647
648
649
def test_histogram(testdir):
650
    test = testdir.makepyfile(SIMPLE_TEST)
651
    result = testdir.runpytest('--doctest-modules', '--benchmark-histogram=foobar',
652
                               '--benchmark-max-time=0.0000001', test)
653
    result.stderr.fnmatch_lines([
654
        "Generated histogram *foobar.svg",
655
    ])
656
    assert [f.basename for f in testdir.tmpdir.listdir("*.svg", sort=True)] == [
657
        'foobar.svg',
658
    ]
659
660
661
def test_autosave(testdir):
662
    test = testdir.makepyfile(SIMPLE_TEST)
663
    result = testdir.runpytest('--doctest-modules', '--benchmark-autosave',
664
                               '--benchmark-max-time=0.0000001', test)
665
    result.stderr.fnmatch_lines([
666
        "Saved benchmark data in *",
667
    ])
668
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].listdir('0001_*.json')[0].read())
669
670
671
def test_bogus_min_time(testdir):
672
    test = testdir.makepyfile(SIMPLE_TEST)
673
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-time=bogus', test)
674
    result.stderr.fnmatch_lines([
675
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
676
        "py*: error: argument --benchmark-min-time: Invalid decimal value 'bogus': InvalidOperation*",
677
    ])
678
679
680
def test_disable_gc(testdir):
681
    test = testdir.makepyfile(SIMPLE_TEST)
682
    result = testdir.runpytest('--benchmark-disable-gc', test)
683
    result.stdout.fnmatch_lines([
684
        "*collected 2 items",
685
        "test_disable_gc.py ..",
686
        "* benchmark: 2 tests *",
687
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
688
        "------*",
689
        "test_fast          *",
690
        "test_slow          *",
691
        "------*",
692
        "*====== 2 passed* seconds ======*",
693
    ])
694
695
696
def test_custom_timer(testdir):
697
    test = testdir.makepyfile(SIMPLE_TEST)
698
    result = testdir.runpytest('--benchmark-timer=time.time', test)
699
    result.stdout.fnmatch_lines([
700
        "*collected 2 items",
701
        "test_custom_timer.py ..",
702
        "* benchmark: 2 tests *",
703
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
704
        "------*",
705
        "test_fast          *",
706
        "test_slow          *",
707
        "------*",
708
        "*====== 2 passed* seconds ======*",
709
    ])
710
711
712
def test_bogus_timer(testdir):
713
    test = testdir.makepyfile(SIMPLE_TEST)
714
    result = testdir.runpytest('--benchmark-timer=bogus', test)
715
    result.stderr.fnmatch_lines([
716
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
717
        "py*: error: argument --benchmark-timer: Value for --benchmark-timer must be in dotted form. Eg: "
718
        "'module.attr'.",
719
    ])
720
721
722
def test_sort_by_mean(testdir):
723
    test = testdir.makepyfile(SIMPLE_TEST)
724
    result = testdir.runpytest('--benchmark-sort=mean', test)
725
    result.stdout.fnmatch_lines([
726
        "*collected 2 items",
727
        "test_sort_by_mean.py ..",
728
        "* benchmark: 2 tests *",
729
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
730
        "------*",
731
        "test_fast          *",
732
        "test_slow          *",
733
        "------*",
734
        "*====== 2 passed* seconds ======*",
735
    ])
736
737
738
def test_bogus_sort(testdir):
739
    test = testdir.makepyfile(SIMPLE_TEST)
740
    result = testdir.runpytest('--benchmark-sort=bogus', test)
741
    result.stderr.fnmatch_lines([
742
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
743
        "py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one of: 'min', 'max', 'mean', 'stddev', 'name', 'fullname'."
744
    ])
745
746
747
def test_xdist(testdir):
748
    pytest.importorskip('xdist')
749
    test = testdir.makepyfile(SIMPLE_TEST)
750
    result = testdir.runpytest('--doctest-modules', '-n', '1', '-rw', test)
751
    result.stdout.fnmatch_lines([
752
        "WBENCHMARK-U2 * Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be "
753
        "performed reliably in a parallelized environment.",
754
    ])
755
756
757
def test_xdist_verbose(testdir):
758
    pytest.importorskip('xdist')
759
    test = testdir.makepyfile(SIMPLE_TEST)
760
    result = testdir.runpytest('--doctest-modules', '-n', '1', '--benchmark-verbose', test)
761
    result.stderr.fnmatch_lines([
762
        "------*",
763
        " WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed "
764
        "reliably in a parallelized environment.",
765
        "------*",
766
    ])
767
768
769
def test_abort_broken(testdir):
770
    """
771
    Test that we don't benchmark code that raises exceptions.
772
    """
773
    test = testdir.makepyfile('''
774
"""
775
    >>> print('Yay, doctests!')
776
    Yay, doctests!
777
"""
778
import time
779
import pytest
780
781
def test_bad(benchmark):
782
    @benchmark
783
    def result():
784
        raise Exception()
785
    assert 1 == 1
786
787
def test_bad2(benchmark):
788
    @benchmark
789
    def result():
790
        time.sleep(0.1)
791
    assert 1 == 0
792
793
@pytest.fixture(params=['a', 'b', 'c'])
794
def bad_fixture(request):
795
    raise ImportError()
796
797
def test_ok(benchmark, bad_fixture):
798
    @benchmark
799
    def result():
800
        time.sleep(0.1)
801
    assert 1 == 0
802
''')
803
    result = testdir.runpytest('-vv', test)
804
    result.stdout.fnmatch_lines([
805
        "*collected 5 items",
806
807
        "test_abort_broken.py::test_bad FAILED",
808
        "test_abort_broken.py::test_bad2 FAILED",
809
        "test_abort_broken.py::test_ok[a] ERROR",
810
        "test_abort_broken.py::test_ok[b] ERROR",
811
        "test_abort_broken.py::test_ok[c] ERROR",
812
813
        "*====== ERRORS ======*",
814
        "*______ ERROR at setup of test_ok[[]a[]] ______*",
815
816
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[a]'>>",
817
818
        "    @pytest.fixture(params=['a', 'b', 'c'])",
819
        "    def bad_fixture(request):",
820
        ">       raise ImportError()",
821
        "E       ImportError",
822
823
        "test_abort_broken.py:22: ImportError",
824
        "*______ ERROR at setup of test_ok[[]b[]] ______*",
825
826
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[b]'>>",
827
828
        "    @pytest.fixture(params=['a', 'b', 'c'])",
829
        "    def bad_fixture(request):",
830
        ">       raise ImportError()",
831
        "E       ImportError",
832
833
        "test_abort_broken.py:22: ImportError",
834
        "*______ ERROR at setup of test_ok[[]c[]] ______*",
835
836
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[c]'>>",
837
838
        "    @pytest.fixture(params=['a', 'b', 'c'])",
839
        "    def bad_fixture(request):",
840
        ">       raise ImportError()",
841
        "E       ImportError",
842
843
        "test_abort_broken.py:22: ImportError",
844
        "*====== FAILURES ======*",
845
        "*______ test_bad ______*",
846
847
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
848
849
        "    def test_bad(benchmark):",
850
        ">       @benchmark",
851
        "        def result():",
852
853
        "test_abort_broken.py:*",
854
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
855
        "*",
856
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
857
858
        "    @benchmark",
859
        "    def result():",
860
        ">       raise Exception()",
861
        "E       Exception",
862
863
        "test_abort_broken.py:11: Exception",
864
        "*______ test_bad2 ______*",
865
866
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
867
868
        "    def test_bad2(benchmark):",
869
        "        @benchmark",
870
        "        def result():",
871
        "            time.sleep(0.1)",
872
        ">       assert 1 == 0",
873
        "E       assert 1 == 0",
874
875
        "test_abort_broken.py:18: AssertionError",
876
        "* benchmark: 1 tests *",
877
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
878
        "------*",
879
        "test_bad2           *",
880
        "------*",
881
882
        "*====== 2 failed*, 3 error* seconds ======*",
883
    ])
884
885
886
BASIC_TEST = '''
887
"""
888
Just to make sure the plugin doesn't choke on doctests::
889
    >>> print('Yay, doctests!')
890
    Yay, doctests!
891
"""
892
import time
893
from functools import partial
894
895
import pytest
896
897
def test_fast(benchmark):
898
    @benchmark
899
    def result():
900
        return time.sleep(0.000001)
901
    assert result is None
902
903
def test_slow(benchmark):
904
    assert benchmark(partial(time.sleep, 0.001)) is None
905
906
def test_slower(benchmark):
907
    benchmark(lambda: time.sleep(0.01))
908
909
@pytest.mark.benchmark(min_rounds=2)
910
def test_xfast(benchmark):
911
    benchmark(str)
912
913
def test_fast(benchmark):
914
    benchmark(int)
915
'''
916
917
918
def test_basic(testdir):
919
    test = testdir.makepyfile(BASIC_TEST)
920
    result = testdir.runpytest('-vv', '--doctest-modules', test)
921
    result.stdout.fnmatch_lines([
922
        "*collected 5 items",
923
        "test_basic.py::*test_basic PASSED",
924
        "test_basic.py::test_slow PASSED",
925
        "test_basic.py::test_slower PASSED",
926
        "test_basic.py::test_xfast PASSED",
927
        "test_basic.py::test_fast PASSED",
928
        "",
929
        "* benchmark: 4 tests *",
930
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
931
        "------*",
932
        "test_*         *",
933
        "test_*         *",
934
        "test_*         *",
935
        "test_*         *",
936
        "------*",
937
        "",
938
        "*====== 5 passed* seconds ======*",
939
    ])
940
941
942
def test_skip(testdir):
943
    test = testdir.makepyfile(BASIC_TEST)
944
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-skip', test)
945
    result.stdout.fnmatch_lines([
946
        "*collected 5 items",
947
        "test_skip.py::*test_skip PASSED",
948
        "test_skip.py::test_slow SKIPPED",
949
        "test_skip.py::test_slower SKIPPED",
950
        "test_skip.py::test_xfast SKIPPED",
951
        "test_skip.py::test_fast SKIPPED",
952
        "*====== 1 passed, 4 skipped* seconds ======*",
953
    ])
954
955
956
def test_disable(testdir):
957
    test = testdir.makepyfile(BASIC_TEST)
958
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test)
959
    result.stdout.fnmatch_lines([
960
        "*collected 5 items",
961
        "test_disable.py::*test_disable PASSED",
962
        "test_disable.py::test_slow PASSED",
963
        "test_disable.py::test_slower PASSED",
964
        "test_disable.py::test_xfast PASSED",
965
        "test_disable.py::test_fast PASSED",
966
        "*====== 5 passed * seconds ======*",
967
    ])
968
969
970
def test_mark_selection(testdir):
971
    test = testdir.makepyfile(BASIC_TEST)
972
    result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test)
973
    result.stdout.fnmatch_lines([
974
        "*collected 5 items",
975
        "test_mark_selection.py::test_xfast PASSED",
976
        "* benchmark: 1 tests *",
977
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
978
        "------*",
979
        "test_xfast       *",
980
        "------*",
981
        "*====== 4 tests deselected by \"-m 'benchmark'\" ======*",
982
        "*====== 1 passed, 4 deselected* seconds ======*",
983
    ])
984
985
986
def test_only_benchmarks(testdir):
987
    test = testdir.makepyfile(BASIC_TEST)
988
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-only', test)
989
    result.stdout.fnmatch_lines([
990
        "*collected 5 items",
991
        "test_only_benchmarks.py::*test_only_benchmarks SKIPPED",
992
        "test_only_benchmarks.py::test_slow PASSED",
993
        "test_only_benchmarks.py::test_slower PASSED",
994
        "test_only_benchmarks.py::test_xfast PASSED",
995
        "test_only_benchmarks.py::test_fast PASSED",
996
        "* benchmark: 4 tests *",
997
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
998
        "------*",
999
        "test_*         *",
1000
        "test_*         *",
1001
        "test_*         *",
1002
        "test_*         *",
1003
        "------*",
1004
        "*====== 4 passed, 1 skipped* seconds ======*",
1005
    ])
1006
1007
def test_columns(testdir):
1008
    test = testdir.makepyfile(SIMPLE_TEST)
1009
    result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,iterations,min', test)
1010
    result.stdout.fnmatch_lines([
1011
        "*collected 3 items",
1012
        "test_columns.py ...",
1013
        "* benchmark: 2 tests *",
1014
        "Name (time in ?s) * Max * Iterations * Min *",
1015
        "------*",
1016
    ])
1017