Completed
Pull Request — master (#37)
by
unknown
58s
created

tests.test_group_by_param()   B

Complexity

Conditions 1

Size

Total Lines 27

Duplication

Lines 0
Ratio 0 %
Metric Value
cc 1
dl 0
loc 27
rs 8.8571
1
import json
2
import platform
3
4
import pytest
5
6
pytest_plugins = 'pytester',
7
platform
8
9
10
def test_help(testdir):
11
    result = testdir.runpytest('--help')
12
    result.stdout.fnmatch_lines([
13
        "*", "*",
14
        "benchmark:",
15
        "  --benchmark-min-time=SECONDS",
16
        "                        Minimum time per round in seconds. Default: '0.000005'",
17
        "  --benchmark-max-time=SECONDS",
18
        "                        Maximum run time per test - it will be repeated until",
19
        "                        this total time is reached. It may be exceeded if test",
20
        "                        function is very slow or --benchmark-min-rounds is",
21
        "                        large (it takes precedence). Default: '1.0'",
22
        "  --benchmark-min-rounds=NUM",
23
        "                        Minimum rounds, even if total time would exceed",
24
        "                        `--max-time`. Default: 5",
25
        "  --benchmark-sort=COL  Column to sort on. Can be one of: 'min', 'max',",
26
        "                        'mean', 'stddev', 'name', 'fullname'. Default: 'min'",
27
        "  --benchmark-group-by=LABEL",
28
        "                        How to group tests. Can be one of: 'group', 'name',",
29
        "                        'fullname', 'func', 'fullfunc', 'param' or",
30
        "                        'param:NAME', where NAME is the name passed to",
31
        "                        @pytest.parametrize. Default: 'group'",
32
        "  --benchmark-timer=FUNC",
33
        "                        Timer to use when measuring time. Default:*",
34
        "  --benchmark-calibration-precision=NUM",
35
        "                        Precision to use when calibrating number of",
36
        "                        iterations. Precision of 10 will make the timer look",
37
        "                        10 times more accurate, at a cost of less precise",
38
        "                        measure of deviations. Default: 10",
39
        "  --benchmark-warmup=[KIND]",
40
        "                        Activates warmup. Will run the test function up to",
41
        "                        number of times in the calibration phase. See",
42
        "                        `--benchmark-warmup-iterations`. Note: Even the warmup",
43
        "                        phase obeys --benchmark-max-time. Available KIND:",
44
        "                        'auto', 'off', 'on'. Default: 'auto' (automatically",
45
        "                        activate on PyPy).",
46
        "  --benchmark-warmup-iterations=NUM",
47
        "                        Max number of iterations to run in the warmup phase.",
48
        "                        Default: 100000",
49
        "  --benchmark-verbose   Dump diagnostic and progress information.",
50
        "  --benchmark-disable-gc",
51
        "                        Disable GC during benchmarks.",
52
        "  --benchmark-skip      Skip running any tests that contain benchmarks.",
53
        "  --benchmark-only      Only run benchmarks.",
54
        "  --benchmark-save=NAME",
55
        "                        Save the current run into 'STORAGE-",
56
        "                        PATH/counter_NAME.json'.",
57
        "  --benchmark-autosave  Autosave the current run into 'STORAGE-",
58
        "                        PATH/counter_*.json",
59
        "  --benchmark-save-data",
60
        "                        Use this to make --benchmark-save and --benchmark-",
61
        "                        autosave include all the timing data, not just the",
62
        "                        stats.",
63
        "  --benchmark-compare=[NUM]",
64
        "                        Compare the current run against run NUM or the latest",
65
        "                        saved run if unspecified.",
66
        "  --benchmark-compare-fail=EXPR=[EXPR=...]",
67
        "                        Fail test if performance regresses according to given",
68
        "                        EXPR (eg: min:5% or mean:0.001 for number of seconds).",
69
        "                        Can be used multiple times.",
70
        "  --benchmark-storage=STORAGE-PATH",
71
        "                        Specify a different path to store the runs (when",
72
        "                        --benchmark-save or --benchmark-autosave are used).",
73
        "                        Default: './.benchmarks/*'",
74
        "  --benchmark-histogram=[FILENAME-PREFIX]",
75
        "                        Plot graphs of min/max/avg/stddev over time in",
76
        "                        FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX",
77
        "                        contains slashes ('/') then directories will be",
78
        "                        created. Default: '*'",
79
        "  --benchmark-json=PATH",
80
        "                        Dump a JSON report into PATH. Note that this will",
81
        "                        include the complete data (all the timings, not just",
82
        "                        the stats).",
83
        "*",
84
    ])
85
86
87
def test_groups(testdir):
88
    test = testdir.makepyfile('''"""
89
    >>> print('Yay, doctests!')
90
    Yay, doctests!
91
"""
92
import time
93
import pytest
94
95
def test_fast(benchmark):
96
    benchmark(lambda: time.sleep(0.000001))
97
    assert 1 == 1
98
99
def test_slow(benchmark):
100
    benchmark(lambda: time.sleep(0.001))
101
    assert 1 == 1
102
103
@pytest.mark.benchmark(group="A")
104
def test_slower(benchmark):
105
    benchmark(lambda: time.sleep(0.01))
106
    assert 1 == 1
107
108
@pytest.mark.benchmark(group="A", warmup=True)
109
def test_xfast(benchmark):
110
    benchmark(lambda: None)
111
    assert 1 == 1
112
''')
113
    result = testdir.runpytest('-vv', '--doctest-modules', test)
114
    result.stdout.fnmatch_lines([
115
        "*collected 5 items",
116
        "*",
117
        "test_groups.py::*test_groups PASSED",
118
        "test_groups.py::test_fast PASSED",
119
        "test_groups.py::test_slow PASSED",
120
        "test_groups.py::test_slower PASSED",
121
        "test_groups.py::test_xfast PASSED",
122
        "*",
123
        "* benchmark: 2 tests *",
124
        "*",
125
        "* benchmark 'A': 2 tests *",
126
        "*",
127
        "*====== 5 passed* seconds ======*",
128
    ])
129
130
131
SIMPLE_TEST = '''
132
"""
133
    >>> print('Yay, doctests!')
134
    Yay, doctests!
135
"""
136
import time
137
import pytest
138
139
def test_fast(benchmark):
140
    @benchmark
141
    def result():
142
        return time.sleep(0.000001)
143
    assert result == None
144
145
def test_slow(benchmark):
146
    benchmark(lambda: time.sleep(0.1))
147
    assert 1 == 1
148
'''
149
150
GROUPING_TEST = '''
151
import pytest
152
153
@pytest.mark.parametrize("foo", range(2))
154
@pytest.mark.benchmark(group="A")
155
def test_a(benchmark, foo):
156
    benchmark(str)
157
158
@pytest.mark.parametrize("foo", range(2))
159
@pytest.mark.benchmark(group="B")
160
def test_b(benchmark, foo):
161
    benchmark(int)
162
'''
163
164
GROUPING_PARAMS_TEST = '''
165
import pytest
166
167
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
168
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
169
@pytest.mark.benchmark(group="A")
170
def test_a(benchmark, foo, bar):
171
    benchmark(str)
172
173
174
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
175
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
176
@pytest.mark.benchmark(group="B")
177
def test_b(benchmark, foo, bar):
178
    benchmark(int)
179
'''
180
181
182
def test_group_by_name(testdir):
183
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
184
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
185
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'name', test_x, test_y)
186
    result.stdout.fnmatch_lines([
187
        '*', '*', '*', '*', '*',
188
        "* benchmark 'test_a[[]0[]]': 2 tests *",
189
        'Name (time in ?s)     *',
190
        '----------------------*',
191
        'test_a[[]0[]]             *',
192
        'test_a[[]0[]]             *',
193
        '----------------------*',
194
        '*',
195
        "* benchmark 'test_a[[]1[]]': 2 tests *",
196
        'Name (time in ?s)     *',
197
        '----------------------*',
198
        'test_a[[]1[]]             *',
199
        'test_a[[]1[]]             *',
200
        '----------------------*',
201
        '*',
202
        "* benchmark 'test_b[[]0[]]': 2 tests *",
203
        'Name (time in ?s)     *',
204
        '----------------------*',
205
        'test_b[[]0[]]             *',
206
        'test_b[[]0[]]             *',
207
        '----------------------*',
208
        '*',
209
        "* benchmark 'test_b[[]1[]]': 2 tests *",
210
        'Name (time in ?s)     *',
211
        '----------------------*',
212
        'test_b[[]1[]]             *',
213
        'test_b[[]1[]]             *',
214
        '----------------------*',
215
    ])
216
217
218
def test_group_by_func(testdir):
219
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
220
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
221
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'func', test_x, test_y)
222
    result.stdout.fnmatch_lines([
223
        '*', '*', '*', '*',
224
        "* benchmark 'test_a': 4 tests *",
225
        'Name (time in ?s)     *',
226
        '----------------------*',
227
        'test_a[[]*[]]             *',
228
        'test_a[[]*[]]             *',
229
        'test_a[[]*[]]             *',
230
        'test_a[[]*[]]             *',
231
        '----------------------*',
232
        '*',
233
        "* benchmark 'test_b': 4 tests *",
234
        'Name (time in ?s)     *',
235
        '----------------------*',
236
        'test_b[[]*[]]             *',
237
        'test_b[[]*[]]             *',
238
        'test_b[[]*[]]             *',
239
        'test_b[[]*[]]             *',
240
        '----------------------*',
241
        '*', '*',
242
        '============* 8 passed* seconds ============*',
243
    ])
244
245
246
def test_group_by_fullfunc(testdir):
247
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
248
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
249
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullfunc', test_x, test_y)
250
    result.stdout.fnmatch_lines([
251
        '*', '*', '*', '*', '*',
252
        "* benchmark 'test_x.py::test_a': 2 tests *",
253
        'Name (time in ?s) *',
254
        '------------------*',
255
        'test_a[[]*[]]         *',
256
        'test_a[[]*[]]         *',
257
        '------------------*',
258
        '',
259
        "* benchmark 'test_x.py::test_b': 2 tests *",
260
        'Name (time in ?s) *',
261
        '------------------*',
262
        'test_b[[]*[]]         *',
263
        'test_b[[]*[]]         *',
264
        '------------------*',
265
        '',
266
        "* benchmark 'test_y.py::test_a': 2 tests *",
267
        'Name (time in ?s) *',
268
        '------------------*',
269
        'test_a[[]*[]]         *',
270
        'test_a[[]*[]]         *',
271
        '------------------*',
272
        '',
273
        "* benchmark 'test_y.py::test_b': 2 tests *",
274
        'Name (time in ?s) *',
275
        '------------------*',
276
        'test_b[[]*[]]         *',
277
        'test_b[[]*[]]         *',
278
        '------------------*',
279
        '',
280
        '(*) Outliers: 1 Standard Deviation from M*',
281
        '============* 8 passed* seconds ============*',
282
    ])
283
284
285
def test_group_by_param_all(testdir):
286
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
287
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
288
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x, test_y)
289
    result.stdout.fnmatch_lines([
290
        '*', '*', '*', '*', '*',
291
        "* benchmark '0': 4 tests *",
292
        'Name (time in ?s)  *',
293
        '-------------------*',
294
        'test_*[[]0[]]          *',
295
        'test_*[[]0[]]          *',
296
        'test_*[[]0[]]          *',
297
        'test_*[[]0[]]          *',
298
        '-------------------*',
299
        '',
300
        "* benchmark '1': 4 tests *",
301
        'Name (time in ?s) *',
302
        '------------------*',
303
        'test_*[[]1[]]         *',
304
        'test_*[[]1[]]         *',
305
        'test_*[[]1[]]         *',
306
        'test_*[[]1[]]         *',
307
        '------------------*',
308
        '',
309
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
310
        'Quartile.',
311
        '============* 8 passed* seconds ============*',
312
    ])
313
314
def test_group_by_param_select(testdir):
315
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
316
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
317
                               '--benchmark-group-by', 'param:foo',
318
                               '--benchmark-sort', 'fullname',
319
                               test_x)
320
    result.stdout.fnmatch_lines([
321
        '*', '*', '*', '*', '*',
322
        "* benchmark 'foo1': 4 tests *",
323
        'Name (time in ?s)  *',
324
        '-------------------*',
325
        'test_a[[]foo1-bar1[]]    *',
326
        'test_a[[]foo1-bar2[]]    *',
327
        'test_b[[]foo1-bar1[]]    *',
328
        'test_b[[]foo1-bar2[]]    *',
329
        '-------------------*',
330
        '',
331
        "* benchmark 'foo2': 4 tests *",
332
        'Name (time in ?s) *',
333
        '------------------*',
334
        'test_a[[]foo2-bar1[]]    *',
335
        'test_a[[]foo2-bar2[]]    *',
336
        'test_b[[]foo2-bar1[]]    *',
337
        'test_b[[]foo2-bar2[]]    *',
338
        '------------------*',
339
        '',
340
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
341
        'Quartile.',
342
        '============* 8 passed* seconds ============*',
343
    ])
344
345
346
def test_group_by_fullname(testdir):
347
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
348
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
349
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullname', test_x, test_y)
350
    result.stdout.fnmatch_lines_random([
351
        "* benchmark 'test_x.py::test_a[[]0[]]': 1 tests *",
352
        "* benchmark 'test_x.py::test_a[[]1[]]': 1 tests *",
353
        "* benchmark 'test_x.py::test_b[[]0[]]': 1 tests *",
354
        "* benchmark 'test_x.py::test_b[[]1[]]': 1 tests *",
355
        "* benchmark 'test_y.py::test_a[[]0[]]': 1 tests *",
356
        "* benchmark 'test_y.py::test_a[[]1[]]': 1 tests *",
357
        "* benchmark 'test_y.py::test_b[[]0[]]': 1 tests *",
358
        "* benchmark 'test_y.py::test_b[[]1[]]': 1 tests *",
359
        '============* 8 passed* seconds ============*',
360
    ])
361
362
363
def test_double_use(testdir):
364
    test = testdir.makepyfile('''
365
def test_a(benchmark):
366
    benchmark(lambda: None)
367
    benchmark.pedantic(lambda: None)
368
369
def test_b(benchmark):
370
    benchmark.pedantic(lambda: None)
371
    benchmark(lambda: None)
372
''')
373
    result = testdir.runpytest(test, '--tb=line')
374
    result.stdout.fnmatch_lines([
375
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark(...) mode.',
376
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark.pedantic(...) mode.',
377
    ])
378
379
380
def test_conflict_between_only_and_skip(testdir):
381
    test = testdir.makepyfile(SIMPLE_TEST)
382
    result = testdir.runpytest('--benchmark-only', '--benchmark-skip', test)
383
    result.stderr.fnmatch_lines([
384
        "ERROR: Can't have both --benchmark-only and --benchmark-skip options."
385
    ])
386
387
388
def test_conflict_between_only_and_disable(testdir):
389
    test = testdir.makepyfile(SIMPLE_TEST)
390
    result = testdir.runpytest('--benchmark-only', '--benchmark-disable', test)
391
    result.stderr.fnmatch_lines([
392
        "ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
393
        "automatically activated if xdist is on or you're missing the statistics dependency."
394
    ])
395
396
397
def test_max_time_min_rounds(testdir):
398
    test = testdir.makepyfile(SIMPLE_TEST)
399
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test)
400
    result.stdout.fnmatch_lines([
401
        "*collected 3 items",
402
        "test_max_time_min_rounds.py ...",
403
        "* benchmark: 2 tests *",
404
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
405
        "------*",
406
        "test_fast          * 1  *",
407
        "test_slow          * 1  *",
408
        "------*",
409
        "*====== 3 passed* seconds ======*",
410
    ])
411
412
413
def test_max_time(testdir):
414
    test = testdir.makepyfile(SIMPLE_TEST)
415
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', test)
416
    result.stdout.fnmatch_lines([
417
        "*collected 3 items",
418
        "test_max_time.py ...",
419
        "* benchmark: 2 tests *",
420
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
421
        "------*",
422
        "test_fast          * 5  *",
423
        "test_slow          * 5  *",
424
        "------*",
425
        "*====== 3 passed* seconds ======*",
426
    ])
427
428
429
def test_bogus_max_time(testdir):
430
    test = testdir.makepyfile(SIMPLE_TEST)
431
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=bogus', test)
432
    result.stderr.fnmatch_lines([
433
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
434
        "py*: error: argument --benchmark-max-time: Invalid decimal value 'bogus': InvalidOperation*",
435
    ])
436
437
438
@pytest.mark.skipif("platform.python_implementation() == 'PyPy'")
439
def test_pep418_timer(testdir):
440
    test = testdir.makepyfile(SIMPLE_TEST)
441
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
442
                               '--benchmark-timer=pep418.perf_counter', test)
443
    result.stdout.fnmatch_lines([
444
        "* (defaults: timer=*.perf_counter*",
445
    ])
446
447
448
def test_bad_save(testdir):
449
    test = testdir.makepyfile(SIMPLE_TEST)
450
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=asd:f?', test)
451
    result.stderr.fnmatch_lines([
452
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
453
        "py*: error: argument --benchmark-save: Must not contain any of these characters: /:*?<>|\\ (it has ':?')",
454
    ])
455
456
457
def test_bad_save_2(testdir):
458
    test = testdir.makepyfile(SIMPLE_TEST)
459
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=', test)
460
    result.stderr.fnmatch_lines([
461
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
462
        "py*: error: argument --benchmark-save: Can't be empty.",
463
    ])
464
465
466
def test_bad_compare_fail(testdir):
467
    test = testdir.makepyfile(SIMPLE_TEST)
468
    result = testdir.runpytest('--doctest-modules', '--benchmark-compare-fail=?', test)
469
    result.stderr.fnmatch_lines([
470
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
471
        "py*: error: argument --benchmark-compare-fail: Could not parse value: '?'.",
472
    ])
473
474
475
def test_bad_rounds(testdir):
476
    test = testdir.makepyfile(SIMPLE_TEST)
477
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=asd', test)
478
    result.stderr.fnmatch_lines([
479
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
480
        "py*: error: argument --benchmark-min-rounds: invalid literal for int() with base 10: 'asd'",
481
    ])
482
483
484
def test_bad_rounds_2(testdir):
485
    test = testdir.makepyfile(SIMPLE_TEST)
486
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=0', test)
487
    result.stderr.fnmatch_lines([
488
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
489
        "py*: error: argument --benchmark-min-rounds: Value for --benchmark-rounds must be at least 1.",
490
    ])
491
492
493
def test_compare(testdir):
494
    test = testdir.makepyfile(SIMPLE_TEST)
495
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
496
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
497
                               '--benchmark-compare-fail=min:0.1', test)
498
    result.stderr.fnmatch_lines([
499
        "Comparing against benchmark 0001_unversioned_*.json:",
500
    ])
501
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
502
                               '--benchmark-compare-fail=min:1%', test)
503
    result.stderr.fnmatch_lines([
504
        "Comparing against benchmark 0001_unversioned_*.json:",
505
    ])
506
507
508
def test_compare_last(testdir):
509
    test = testdir.makepyfile(SIMPLE_TEST)
510
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
511
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
512
                               '--benchmark-compare-fail=min:0.1', test)
513
    result.stderr.fnmatch_lines([
514
        "Comparing against benchmark 0001_unversioned_*.json:",
515
    ])
516
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
517
                               '--benchmark-compare-fail=min:1%', test)
518
    result.stderr.fnmatch_lines([
519
        "Comparing against benchmark 0001_unversioned_*.json:",
520
    ])
521
522
523
def test_compare_non_existing(testdir):
524
    test = testdir.makepyfile(SIMPLE_TEST)
525
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
526
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', '-rw',
527
                               test)
528
    result.stdout.fnmatch_lines([
529
        "WBENCHMARK-C1 * Can't compare. No benchmark files matched '0002'",
530
    ])
531
532
533
def test_compare_non_existing_verbose(testdir):
534
    test = testdir.makepyfile(SIMPLE_TEST)
535
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
536
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002',
537
                               test, '--benchmark-verbose')
538
    result.stderr.fnmatch_lines([
539
        " WARNING: Can't compare. No benchmark files matched '0002'",
540
    ])
541
542
543
def test_compare_no_files(testdir):
544
    test = testdir.makepyfile(SIMPLE_TEST)
545
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
546
                               test, '--benchmark-compare')
547
    result.stdout.fnmatch_lines([
548
         "WBENCHMARK-C3 * Can't compare. No benchmark files in '*'. Expected files matching *.json."
549
         " Can't load the previous benchmark."
550
    ])
551
552
553
def test_compare_no_files_verbose(testdir):
554
    test = testdir.makepyfile(SIMPLE_TEST)
555
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
556
                               test, '--benchmark-compare', '--benchmark-verbose')
557
    result.stderr.fnmatch_lines([
558
        " WARNING: Can't compare. No benchmark files in '*'. Expected files matching *.json."
559
        " Can't load the previous benchmark."
560
    ])
561
562
563
def test_compare_no_files_match(testdir):
564
    test = testdir.makepyfile(SIMPLE_TEST)
565
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
566
                               test, '--benchmark-compare=1')
567
    result.stdout.fnmatch_lines([
568
        "WBENCHMARK-C4 * Can't compare. No benchmark files in '*'. Expected files matching *.json."
569
         " Can't match anything to '1'."
570
    ])
571
572
573
def test_compare_no_files_match_verbose(testdir):
574
    test = testdir.makepyfile(SIMPLE_TEST)
575
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
576
                               test, '--benchmark-compare=1', '--benchmark-verbose')
577
    result.stderr.fnmatch_lines([
578
        " WARNING: Can't compare. No benchmark files in '*'. Expected files matching *.json."
579
        " Can't match anything to '1'."
580
    ])
581
582
583
def test_compare_too_many(testdir):
584
    test = testdir.makepyfile(SIMPLE_TEST)
585
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
586
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
587
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0', '-rw',
588
                               test)
589
    result.stdout.fnmatch_lines([
590
        "WBENCHMARK-C2 * Can't compare. Too many benchmark files matched '0':",
591
        ' - *0001_unversioned_*.json',
592
        ' - *0002_unversioned_*.json',
593
    ])
594
595
596
def test_compare_too_many_verbose(testdir):
597
    test = testdir.makepyfile(SIMPLE_TEST)
598
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
599
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
600
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0',
601
                               '--benchmark-verbose', test)
602
    result.stderr.fnmatch_lines([
603
        " WARNING: Can't compare. Too many benchmark files matched '0':",
604
        ' - *0001_unversioned_*.json',
605
        ' - *0002_unversioned_*.json',
606
    ])
607
608
609
def test_verbose(testdir):
610
    test = testdir.makepyfile(SIMPLE_TEST)
611
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-verbose',
612
                               '-vv', test)
613
    result.stderr.fnmatch_lines([
614
        "  Timer precision: *s",
615
        "  Calibrating to target round *s; will estimate when reaching *s.",
616
        "    Measured * iterations: *s.",
617
        "  Running * rounds x * iterations ...",
618
        "  Ran for *s.",
619
    ])
620
621
622
def test_save(testdir):
623
    test = testdir.makepyfile(SIMPLE_TEST)
624
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
625
                               '--benchmark-max-time=0.0000001', test)
626
    result.stderr.fnmatch_lines([
627
        "Saved benchmark data in *",
628
    ])
629
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
630
631
632
def test_histogram(testdir):
633
    test = testdir.makepyfile(SIMPLE_TEST)
634
    result = testdir.runpytest('--doctest-modules', '--benchmark-histogram=foobar',
635
                               '--benchmark-max-time=0.0000001', test)
636
    result.stderr.fnmatch_lines([
637
        "Generated histogram *foobar-test_histogram.py_test_fast.svg",
638
        "Generated histogram *foobar-test_histogram.py_test_slow.svg",
639
    ])
640
    assert [f.basename for f in testdir.tmpdir.listdir("*.svg", sort=True)] == [
641
        'foobar-test_histogram.py_test_fast.svg',
642
        'foobar-test_histogram.py_test_slow.svg',
643
    ]
644
645
646
def test_autosave(testdir):
647
    test = testdir.makepyfile(SIMPLE_TEST)
648
    result = testdir.runpytest('--doctest-modules', '--benchmark-autosave',
649
                               '--benchmark-max-time=0.0000001', test)
650
    result.stderr.fnmatch_lines([
651
        "Saved benchmark data in *",
652
    ])
653
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].listdir('0001_*.json')[0].read())
654
655
656
def test_bogus_min_time(testdir):
657
    test = testdir.makepyfile(SIMPLE_TEST)
658
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-time=bogus', test)
659
    result.stderr.fnmatch_lines([
660
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
661
        "py*: error: argument --benchmark-min-time: Invalid decimal value 'bogus': InvalidOperation*",
662
    ])
663
664
665
def test_disable_gc(testdir):
666
    test = testdir.makepyfile(SIMPLE_TEST)
667
    result = testdir.runpytest('--benchmark-disable-gc', test)
668
    result.stdout.fnmatch_lines([
669
        "*collected 2 items",
670
        "test_disable_gc.py ..",
671
        "* benchmark: 2 tests *",
672
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
673
        "------*",
674
        "test_fast          *",
675
        "test_slow          *",
676
        "------*",
677
        "*====== 2 passed* seconds ======*",
678
    ])
679
680
681
def test_custom_timer(testdir):
682
    test = testdir.makepyfile(SIMPLE_TEST)
683
    result = testdir.runpytest('--benchmark-timer=time.time', test)
684
    result.stdout.fnmatch_lines([
685
        "*collected 2 items",
686
        "test_custom_timer.py ..",
687
        "* benchmark: 2 tests *",
688
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
689
        "------*",
690
        "test_fast          *",
691
        "test_slow          *",
692
        "------*",
693
        "*====== 2 passed* seconds ======*",
694
    ])
695
696
697
def test_bogus_timer(testdir):
698
    test = testdir.makepyfile(SIMPLE_TEST)
699
    result = testdir.runpytest('--benchmark-timer=bogus', test)
700
    result.stderr.fnmatch_lines([
701
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
702
        "py*: error: argument --benchmark-timer: Value for --benchmark-timer must be in dotted form. Eg: "
703
        "'module.attr'.",
704
    ])
705
706
707
def test_sort_by_mean(testdir):
708
    test = testdir.makepyfile(SIMPLE_TEST)
709
    result = testdir.runpytest('--benchmark-sort=mean', test)
710
    result.stdout.fnmatch_lines([
711
        "*collected 2 items",
712
        "test_sort_by_mean.py ..",
713
        "* benchmark: 2 tests *",
714
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
715
        "------*",
716
        "test_fast          *",
717
        "test_slow          *",
718
        "------*",
719
        "*====== 2 passed* seconds ======*",
720
    ])
721
722
723
def test_bogus_sort(testdir):
724
    test = testdir.makepyfile(SIMPLE_TEST)
725
    result = testdir.runpytest('--benchmark-sort=bogus', test)
726
    result.stderr.fnmatch_lines([
727
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
728
        "py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one of: 'min', 'max', 'mean', 'stddev', 'name', 'fullname'."
729
    ])
730
731
732
def test_xdist(testdir):
733
    pytest.importorskip('xdist')
734
    test = testdir.makepyfile(SIMPLE_TEST)
735
    result = testdir.runpytest('--doctest-modules', '-n', '1', '-rw', test)
736
    result.stdout.fnmatch_lines([
737
        "WBENCHMARK-U2 * Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be "
738
        "performed reliably in a parallelized environment.",
739
    ])
740
741
742
def test_xdist_verbose(testdir):
743
    pytest.importorskip('xdist')
744
    test = testdir.makepyfile(SIMPLE_TEST)
745
    result = testdir.runpytest('--doctest-modules', '-n', '1', '--benchmark-verbose', test)
746
    result.stderr.fnmatch_lines([
747
        "------*",
748
        " WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed "
749
        "reliably in a parallelized environment.",
750
        "------*",
751
    ])
752
753
754
def test_abort_broken(testdir):
755
    """
756
    Test that we don't benchmark code that raises exceptions.
757
    """
758
    test = testdir.makepyfile('''
759
"""
760
    >>> print('Yay, doctests!')
761
    Yay, doctests!
762
"""
763
import time
764
import pytest
765
766
def test_bad(benchmark):
767
    @benchmark
768
    def result():
769
        raise Exception()
770
    assert 1 == 1
771
772
def test_bad2(benchmark):
773
    @benchmark
774
    def result():
775
        time.sleep(0.1)
776
    assert 1 == 0
777
778
@pytest.fixture(params=['a', 'b', 'c'])
779
def bad_fixture(request):
780
    raise ImportError()
781
782
def test_ok(benchmark, bad_fixture):
783
    @benchmark
784
    def result():
785
        time.sleep(0.1)
786
    assert 1 == 0
787
''')
788
    result = testdir.runpytest('-vv', test)
789
    result.stdout.fnmatch_lines([
790
        "*collected 5 items",
791
792
        "test_abort_broken.py::test_bad FAILED",
793
        "test_abort_broken.py::test_bad2 FAILED",
794
        "test_abort_broken.py::test_ok[a] ERROR",
795
        "test_abort_broken.py::test_ok[b] ERROR",
796
        "test_abort_broken.py::test_ok[c] ERROR",
797
798
        "*====== ERRORS ======*",
799
        "*______ ERROR at setup of test_ok[[]a[]] ______*",
800
801
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[a]'>>",
802
803
        "    @pytest.fixture(params=['a', 'b', 'c'])",
804
        "    def bad_fixture(request):",
805
        ">       raise ImportError()",
806
        "E       ImportError",
807
808
        "test_abort_broken.py:22: ImportError",
809
        "*______ ERROR at setup of test_ok[[]b[]] ______*",
810
811
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[b]'>>",
812
813
        "    @pytest.fixture(params=['a', 'b', 'c'])",
814
        "    def bad_fixture(request):",
815
        ">       raise ImportError()",
816
        "E       ImportError",
817
818
        "test_abort_broken.py:22: ImportError",
819
        "*______ ERROR at setup of test_ok[[]c[]] ______*",
820
821
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[c]'>>",
822
823
        "    @pytest.fixture(params=['a', 'b', 'c'])",
824
        "    def bad_fixture(request):",
825
        ">       raise ImportError()",
826
        "E       ImportError",
827
828
        "test_abort_broken.py:22: ImportError",
829
        "*====== FAILURES ======*",
830
        "*______ test_bad ______*",
831
832
        "benchmark = <pytest_benchmark.plugin.BenchmarkFixture object at *>",
833
834
        "    def test_bad(benchmark):",
835
        ">       @benchmark",
836
        "        def result():",
837
838
        "test_abort_broken.py:*",
839
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
840
        "*pytest_benchmark/plugin.py:*: in __call__",
841
        "    duration, iterations, loops_range = self._calibrate_timer(runner)",
842
        "*pytest_benchmark/plugin.py:*: in _calibrate_timer",
843
        "    duration = runner(loops_range)",
844
        "*pytest_benchmark/plugin.py:*: in runner",
845
        "    *",
846
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
847
848
        "    @benchmark",
849
        "    def result():",
850
        ">       raise Exception()",
851
        "E       Exception",
852
853
        "test_abort_broken.py:11: Exception",
854
        "*______ test_bad2 ______*",
855
856
        "benchmark = <pytest_benchmark.plugin.BenchmarkFixture object at *>",
857
858
        "    def test_bad2(benchmark):",
859
        "        @benchmark",
860
        "        def result():",
861
        "            time.sleep(0.1)",
862
        ">       assert 1 == 0",
863
        "E       assert 1 == 0",
864
865
        "test_abort_broken.py:18: AssertionError",
866
        "* benchmark: 1 tests *",
867
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
868
        "------*",
869
        "test_bad2           *",
870
        "------*",
871
872
        "*====== 2 failed*, 3 error* seconds ======*",
873
    ])
874
875
876
BASIC_TEST = '''
877
"""
878
Just to make sure the plugin doesn't choke on doctests::
879
    >>> print('Yay, doctests!')
880
    Yay, doctests!
881
"""
882
import time
883
from functools import partial
884
885
import pytest
886
887
def test_fast(benchmark):
888
    @benchmark
889
    def result():
890
        return time.sleep(0.000001)
891
    assert result is None
892
893
def test_slow(benchmark):
894
    assert benchmark(partial(time.sleep, 0.001)) is None
895
896
def test_slower(benchmark):
897
    benchmark(lambda: time.sleep(0.01))
898
899
@pytest.mark.benchmark(min_rounds=2)
900
def test_xfast(benchmark):
901
    benchmark(str)
902
903
def test_fast(benchmark):
904
    benchmark(int)
905
'''
906
907
908
def test_basic(testdir):
909
    test = testdir.makepyfile(BASIC_TEST)
910
    result = testdir.runpytest('-vv', '--doctest-modules', test)
911
    result.stdout.fnmatch_lines([
912
        "*collected 5 items",
913
        "test_basic.py::*test_basic PASSED",
914
        "test_basic.py::test_slow PASSED",
915
        "test_basic.py::test_slower PASSED",
916
        "test_basic.py::test_xfast PASSED",
917
        "test_basic.py::test_fast PASSED",
918
        "",
919
        "* benchmark: 4 tests *",
920
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
921
        "------*",
922
        "test_*         *",
923
        "test_*         *",
924
        "test_*         *",
925
        "test_*         *",
926
        "------*",
927
        "",
928
        "*====== 5 passed* seconds ======*",
929
    ])
930
931
932
def test_skip(testdir):
933
    test = testdir.makepyfile(BASIC_TEST)
934
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-skip', test)
935
    result.stdout.fnmatch_lines([
936
        "*collected 5 items",
937
        "test_skip.py::*test_skip PASSED",
938
        "test_skip.py::test_slow SKIPPED",
939
        "test_skip.py::test_slower SKIPPED",
940
        "test_skip.py::test_xfast SKIPPED",
941
        "test_skip.py::test_fast SKIPPED",
942
        "*====== 1 passed, 4 skipped* seconds ======*",
943
    ])
944
945
946
def test_disable(testdir):
947
    test = testdir.makepyfile(BASIC_TEST)
948
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test)
949
    result.stdout.fnmatch_lines([
950
        "*collected 5 items",
951
        "test_disable.py::*test_disable PASSED",
952
        "test_disable.py::test_slow PASSED",
953
        "test_disable.py::test_slower PASSED",
954
        "test_disable.py::test_xfast PASSED",
955
        "test_disable.py::test_fast PASSED",
956
        "*====== 5 passed * seconds ======*",
957
    ])
958
959
960
def test_mark_selection(testdir):
961
    test = testdir.makepyfile(BASIC_TEST)
962
    result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test)
963
    result.stdout.fnmatch_lines([
964
        "*collected 5 items",
965
        "test_mark_selection.py::test_xfast PASSED",
966
        "* benchmark: 1 tests *",
967
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
968
        "------*",
969
        "test_xfast       *",
970
        "------*",
971
        "*====== 4 tests deselected by \"-m 'benchmark'\" ======*",
972
        "*====== 1 passed, 4 deselected* seconds ======*",
973
    ])
974
975
976
def test_only_benchmarks(testdir):
977
    test = testdir.makepyfile(BASIC_TEST)
978
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-only', test)
979
    result.stdout.fnmatch_lines([
980
        "*collected 5 items",
981
        "test_only_benchmarks.py::*test_only_benchmarks SKIPPED",
982
        "test_only_benchmarks.py::test_slow PASSED",
983
        "test_only_benchmarks.py::test_slower PASSED",
984
        "test_only_benchmarks.py::test_xfast PASSED",
985
        "test_only_benchmarks.py::test_fast PASSED",
986
        "* benchmark: 4 tests *",
987
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
988
        "------*",
989
        "test_*         *",
990
        "test_*         *",
991
        "test_*         *",
992
        "test_*         *",
993
        "------*",
994
        "*====== 4 passed, 1 skipped* seconds ======*",
995
    ])
996
997
def test_columns(testdir):
998
    test = testdir.makepyfile(SIMPLE_TEST)
999
    result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,iterations,min', test)
1000
    result.stdout.fnmatch_lines([
1001
        "*collected 3 items",
1002
        "test_columns.py ...",
1003
        "* benchmark: 2 tests *",
1004
        "Name (time in ?s) * Max * Iterations * Min *",
1005
        "------*",
1006
    ])
1007