Completed
Pull Request — master (#34)
by
unknown
01:26
created

tests.test_columns()   A

Complexity

Conditions 1

Size

Total Lines 13

Duplication

Lines 0
Ratio 0 %
Metric Value
cc 1
dl 0
loc 13
rs 9.4286
1
import json
2
import platform
3
4
import pytest
5
6
pytest_plugins = 'pytester',
7
platform
8
9
10
def test_help(testdir):
11
    result = testdir.runpytest('--help')
12
    result.stdout.fnmatch_lines([
13
        "*", "*",
14
        "benchmark:",
15
        "  --benchmark-min-time=SECONDS",
16
        "                        Minimum time per round in seconds. Default: '0.000005'",
17
        "  --benchmark-max-time=SECONDS",
18
        "                        Maximum run time per test - it will be repeated until",
19
        "                        this total time is reached. It may be exceeded if test",
20
        "                        function is very slow or --benchmark-min-rounds is",
21
        "                        large (it takes precedence). Default: '1.0'",
22
        "  --benchmark-min-rounds=NUM",
23
        "                        Minimum rounds, even if total time would exceed",
24
        "                        `--max-time`. Default: 5",
25
        "  --benchmark-sort=COL  Column to sort on. Can be one of: 'min', 'max', 'mean'",
26
        "                        or 'stddev'. Default: 'min'",
27
        "  --benchmark-group-by=LABEL",
28
        "                        How to group tests. Can be one of: 'group', 'name',",
29
        "                        'fullname', 'func', 'fullfunc' or 'param'. Default:",
30
        "                        'group'",
31
        "  --benchmark-timer=FUNC",
32
        "                        Timer to use when measuring time. Default:*",
33
        "  --benchmark-calibration-precision=NUM",
34
        "                        Precision to use when calibrating number of",
35
        "                        iterations. Precision of 10 will make the timer look",
36
        "                        10 times more accurate, at a cost of less precise",
37
        "                        measure of deviations. Default: 10",
38
        "  --benchmark-warmup=[KIND]",
39
        "                        Activates warmup. Will run the test function up to",
40
        "                        number of times in the calibration phase. See",
41
        "                        `--benchmark-warmup-iterations`. Note: Even the warmup",
42
        "                        phase obeys --benchmark-max-time. Available KIND:",
43
        "                        'auto', 'off', 'on'. Default: 'auto' (automatically",
44
        "                        activate on PyPy).",
45
        "  --benchmark-warmup-iterations=NUM",
46
        "                        Max number of iterations to run in the warmup phase.",
47
        "                        Default: 100000",
48
        "  --benchmark-verbose   Dump diagnostic and progress information.",
49
        "  --benchmark-disable-gc",
50
        "                        Disable GC during benchmarks.",
51
        "  --benchmark-skip      Skip running any tests that contain benchmarks.",
52
        "  --benchmark-only      Only run benchmarks.",
53
        "  --benchmark-save=NAME",
54
        "                        Save the current run into 'STORAGE-",
55
        "                        PATH/counter_NAME.json'.",
56
        "  --benchmark-autosave  Autosave the current run into 'STORAGE-",
57
        "                        PATH/counter_*.json",
58
        "  --benchmark-save-data",
59
        "                        Use this to make --benchmark-save and --benchmark-",
60
        "                        autosave include all the timing data, not just the",
61
        "                        stats.",
62
        "  --benchmark-compare=[NUM]",
63
        "                        Compare the current run against run NUM or the latest",
64
        "                        saved run if unspecified.",
65
        "  --benchmark-compare-fail=EXPR=[EXPR=...]",
66
        "                        Fail test if performance regresses according to given",
67
        "                        EXPR (eg: min:5% or mean:0.001 for number of seconds).",
68
        "                        Can be used multiple times.",
69
        "  --benchmark-storage=STORAGE-PATH",
70
        "                        Specify a different path to store the runs (when",
71
        "                        --benchmark-save or --benchmark-autosave are used).",
72
        "                        Default: './.benchmarks/*'",
73
        "  --benchmark-histogram=[FILENAME-PREFIX]",
74
        "                        Plot graphs of min/max/avg/stddev over time in",
75
        "                        FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX",
76
        "                        contains slashes ('/') then directories will be",
77
        "                        created. Default: '*'",
78
        "  --benchmark-json=PATH",
79
        "                        Dump a JSON report into PATH. Note that this will",
80
        "                        include the complete data (all the timings, not just",
81
        "                        the stats).",
82
        "*",
83
    ])
84
85
86
def test_groups(testdir):
87
    test = testdir.makepyfile('''"""
88
    >>> print('Yay, doctests!')
89
    Yay, doctests!
90
"""
91
import time
92
import pytest
93
94
def test_fast(benchmark):
95
    benchmark(lambda: time.sleep(0.000001))
96
    assert 1 == 1
97
98
def test_slow(benchmark):
99
    benchmark(lambda: time.sleep(0.001))
100
    assert 1 == 1
101
102
@pytest.mark.benchmark(group="A")
103
def test_slower(benchmark):
104
    benchmark(lambda: time.sleep(0.01))
105
    assert 1 == 1
106
107
@pytest.mark.benchmark(group="A", warmup=True)
108
def test_xfast(benchmark):
109
    benchmark(lambda: None)
110
    assert 1 == 1
111
''')
112
    result = testdir.runpytest('-vv', '--doctest-modules', test)
113
    result.stdout.fnmatch_lines([
114
        "*collected 5 items",
115
        "*",
116
        "test_groups.py::*test_groups PASSED",
117
        "test_groups.py::test_fast PASSED",
118
        "test_groups.py::test_slow PASSED",
119
        "test_groups.py::test_slower PASSED",
120
        "test_groups.py::test_xfast PASSED",
121
        "*",
122
        "* benchmark: 2 tests *",
123
        "*",
124
        "* benchmark 'A': 2 tests *",
125
        "*",
126
        "*====== 5 passed* seconds ======*",
127
    ])
128
129
130
SIMPLE_TEST = '''
131
"""
132
    >>> print('Yay, doctests!')
133
    Yay, doctests!
134
"""
135
import time
136
import pytest
137
138
def test_fast(benchmark):
139
    @benchmark
140
    def result():
141
        return time.sleep(0.000001)
142
    assert result == None
143
144
def test_slow(benchmark):
145
    benchmark(lambda: time.sleep(0.1))
146
    assert 1 == 1
147
'''
148
149
GROUPING_TEST = '''
150
import pytest
151
152
@pytest.mark.parametrize("foo", range(2))
153
@pytest.mark.benchmark(group="A")
154
def test_a(benchmark, foo):
155
    benchmark(str)
156
157
@pytest.mark.parametrize("foo", range(2))
158
@pytest.mark.benchmark(group="B")
159
def test_b(benchmark, foo):
160
    benchmark(int)
161
'''
162
163
164
def test_group_by_name(testdir):
165
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
166
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
167
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'name', test_x, test_y)
168
    result.stdout.fnmatch_lines([
169
        '*', '*', '*', '*', '*',
170
        "* benchmark 'test_a[[]0[]]': 2 tests *",
171
        'Name (time in ?s)     *',
172
        '----------------------*',
173
        'test_a[[]0[]]             *',
174
        'test_a[[]0[]]             *',
175
        '----------------------*',
176
        '*',
177
        "* benchmark 'test_a[[]1[]]': 2 tests *",
178
        'Name (time in ?s)     *',
179
        '----------------------*',
180
        'test_a[[]1[]]             *',
181
        'test_a[[]1[]]             *',
182
        '----------------------*',
183
        '*',
184
        "* benchmark 'test_b[[]0[]]': 2 tests *",
185
        'Name (time in ?s)     *',
186
        '----------------------*',
187
        'test_b[[]0[]]             *',
188
        'test_b[[]0[]]             *',
189
        '----------------------*',
190
        '*',
191
        "* benchmark 'test_b[[]1[]]': 2 tests *",
192
        'Name (time in ?s)     *',
193
        '----------------------*',
194
        'test_b[[]1[]]             *',
195
        'test_b[[]1[]]             *',
196
        '----------------------*',
197
    ])
198
199
200
def test_group_by_func(testdir):
201
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
202
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
203
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'func', test_x, test_y)
204
    result.stdout.fnmatch_lines([
205
        '*', '*', '*', '*',
206
        "* benchmark 'test_a': 4 tests *",
207
        'Name (time in ?s)     *',
208
        '----------------------*',
209
        'test_a[[]*[]]             *',
210
        'test_a[[]*[]]             *',
211
        'test_a[[]*[]]             *',
212
        'test_a[[]*[]]             *',
213
        '----------------------*',
214
        '*',
215
        "* benchmark 'test_b': 4 tests *",
216
        'Name (time in ?s)     *',
217
        '----------------------*',
218
        'test_b[[]*[]]             *',
219
        'test_b[[]*[]]             *',
220
        'test_b[[]*[]]             *',
221
        'test_b[[]*[]]             *',
222
        '----------------------*',
223
        '*', '*',
224
        '============* 8 passed* seconds ============*',
225
    ])
226
227
228
def test_group_by_fullfunc(testdir):
229
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
230
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
231
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullfunc', test_x, test_y)
232
    result.stdout.fnmatch_lines([
233
        '*', '*', '*', '*', '*',
234
        "* benchmark 'test_x.py::test_a': 2 tests *",
235
        'Name (time in ?s) *',
236
        '------------------*',
237
        'test_a[[]*[]]         *',
238
        'test_a[[]*[]]         *',
239
        '------------------*',
240
        '',
241
        "* benchmark 'test_x.py::test_b': 2 tests *",
242
        'Name (time in ?s) *',
243
        '------------------*',
244
        'test_b[[]*[]]         *',
245
        'test_b[[]*[]]         *',
246
        '------------------*',
247
        '',
248
        "* benchmark 'test_y.py::test_a': 2 tests *",
249
        'Name (time in ?s) *',
250
        '------------------*',
251
        'test_a[[]*[]]         *',
252
        'test_a[[]*[]]         *',
253
        '------------------*',
254
        '',
255
        "* benchmark 'test_y.py::test_b': 2 tests *",
256
        'Name (time in ?s) *',
257
        '------------------*',
258
        'test_b[[]*[]]         *',
259
        'test_b[[]*[]]         *',
260
        '------------------*',
261
        '',
262
        '(*) Outliers: 1 Standard Deviation from M*',
263
        '============* 8 passed* seconds ============*',
264
    ])
265
266
267
def test_group_by_param(testdir):
268
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
269
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
270
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x, test_y)
271
    result.stdout.fnmatch_lines([
272
        '*', '*', '*', '*', '*',
273
        "* benchmark '0': 4 tests *",
274
        'Name (time in ?s)  *',
275
        '-------------------*',
276
        'test_*[[]0[]]          *',
277
        'test_*[[]0[]]          *',
278
        'test_*[[]0[]]          *',
279
        'test_*[[]0[]]          *',
280
        '-------------------*',
281
        '',
282
        "* benchmark '1': 4 tests *",
283
        'Name (time in ?s) *',
284
        '------------------*',
285
        'test_*[[]1[]]         *',
286
        'test_*[[]1[]]         *',
287
        'test_*[[]1[]]         *',
288
        'test_*[[]1[]]         *',
289
        '------------------*',
290
        '',
291
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
292
        'Quartile.',
293
        '============* 8 passed* seconds ============*',
294
    ])
295
296
297
def test_group_by_fullname(testdir):
298
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
299
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
300
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullname', test_x, test_y)
301
    result.stdout.fnmatch_lines_random([
302
        "* benchmark 'test_x.py::test_a[[]0[]]': 1 tests *",
303
        "* benchmark 'test_x.py::test_a[[]1[]]': 1 tests *",
304
        "* benchmark 'test_x.py::test_b[[]0[]]': 1 tests *",
305
        "* benchmark 'test_x.py::test_b[[]1[]]': 1 tests *",
306
        "* benchmark 'test_y.py::test_a[[]0[]]': 1 tests *",
307
        "* benchmark 'test_y.py::test_a[[]1[]]': 1 tests *",
308
        "* benchmark 'test_y.py::test_b[[]0[]]': 1 tests *",
309
        "* benchmark 'test_y.py::test_b[[]1[]]': 1 tests *",
310
        '============* 8 passed* seconds ============*',
311
    ])
312
313
314
def test_double_use(testdir):
315
    test = testdir.makepyfile('''
316
def test_a(benchmark):
317
    benchmark(lambda: None)
318
    benchmark.pedantic(lambda: None)
319
320
def test_b(benchmark):
321
    benchmark.pedantic(lambda: None)
322
    benchmark(lambda: None)
323
''')
324
    result = testdir.runpytest(test, '--tb=line')
325
    result.stdout.fnmatch_lines([
326
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark(...) mode.',
327
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark.pedantic(...) mode.',
328
    ])
329
330
331
def test_conflict_between_only_and_skip(testdir):
332
    test = testdir.makepyfile(SIMPLE_TEST)
333
    result = testdir.runpytest('--benchmark-only', '--benchmark-skip', test)
334
    result.stderr.fnmatch_lines([
335
        "ERROR: Can't have both --benchmark-only and --benchmark-skip options."
336
    ])
337
338
339
def test_conflict_between_only_and_disable(testdir):
340
    test = testdir.makepyfile(SIMPLE_TEST)
341
    result = testdir.runpytest('--benchmark-only', '--benchmark-disable', test)
342
    result.stderr.fnmatch_lines([
343
        "ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
344
        "automatically activated if xdist is on or you're missing the statistics dependency."
345
    ])
346
347
348
def test_max_time_min_rounds(testdir):
349
    test = testdir.makepyfile(SIMPLE_TEST)
350
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test)
351
    result.stdout.fnmatch_lines([
352
        "*collected 3 items",
353
        "test_max_time_min_rounds.py ...",
354
        "* benchmark: 2 tests *",
355
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
356
        "------*",
357
        "test_fast          * 1  *",
358
        "test_slow          * 1  *",
359
        "------*",
360
        "*====== 3 passed* seconds ======*",
361
    ])
362
363
364
def test_max_time(testdir):
365
    test = testdir.makepyfile(SIMPLE_TEST)
366
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', test)
367
    result.stdout.fnmatch_lines([
368
        "*collected 3 items",
369
        "test_max_time.py ...",
370
        "* benchmark: 2 tests *",
371
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
372
        "------*",
373
        "test_fast          * 5  *",
374
        "test_slow          * 5  *",
375
        "------*",
376
        "*====== 3 passed* seconds ======*",
377
    ])
378
379
380
def test_bogus_max_time(testdir):
381
    test = testdir.makepyfile(SIMPLE_TEST)
382
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=bogus', test)
383
    result.stderr.fnmatch_lines([
384
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
385
        "py*: error: argument --benchmark-max-time: Invalid decimal value 'bogus': InvalidOperation*",
386
    ])
387
388
389
@pytest.mark.skipif("platform.python_implementation() == 'PyPy'")
390
def test_pep418_timer(testdir):
391
    test = testdir.makepyfile(SIMPLE_TEST)
392
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
393
                               '--benchmark-timer=pep418.perf_counter', test)
394
    result.stdout.fnmatch_lines([
395
        "* (defaults: timer=*.perf_counter*",
396
    ])
397
398
399
def test_bad_save(testdir):
400
    test = testdir.makepyfile(SIMPLE_TEST)
401
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=asd:f?', test)
402
    result.stderr.fnmatch_lines([
403
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
404
        "py*: error: argument --benchmark-save: Must not contain any of these characters: /:*?<>|\\ (it has ':?')",
405
    ])
406
407
408
def test_bad_save_2(testdir):
409
    test = testdir.makepyfile(SIMPLE_TEST)
410
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=', test)
411
    result.stderr.fnmatch_lines([
412
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
413
        "py*: error: argument --benchmark-save: Can't be empty.",
414
    ])
415
416
417
def test_bad_compare_fail(testdir):
418
    test = testdir.makepyfile(SIMPLE_TEST)
419
    result = testdir.runpytest('--doctest-modules', '--benchmark-compare-fail=?', test)
420
    result.stderr.fnmatch_lines([
421
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
422
        "py*: error: argument --benchmark-compare-fail: Could not parse value: '?'.",
423
    ])
424
425
426
def test_bad_rounds(testdir):
427
    test = testdir.makepyfile(SIMPLE_TEST)
428
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=asd', test)
429
    result.stderr.fnmatch_lines([
430
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
431
        "py*: error: argument --benchmark-min-rounds: invalid literal for int() with base 10: 'asd'",
432
    ])
433
434
435
def test_bad_rounds_2(testdir):
436
    test = testdir.makepyfile(SIMPLE_TEST)
437
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=0', test)
438
    result.stderr.fnmatch_lines([
439
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
440
        "py*: error: argument --benchmark-min-rounds: Value for --benchmark-rounds must be at least 1.",
441
    ])
442
443
444
def test_compare(testdir):
445
    test = testdir.makepyfile(SIMPLE_TEST)
446
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
447
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
448
                               '--benchmark-compare-fail=min:0.1', test)
449
    result.stderr.fnmatch_lines([
450
        "Comparing against benchmark 0001_unversioned_*.json:",
451
    ])
452
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
453
                               '--benchmark-compare-fail=min:1%', test)
454
    result.stderr.fnmatch_lines([
455
        "Comparing against benchmark 0001_unversioned_*.json:",
456
    ])
457
458
459
def test_compare_last(testdir):
460
    test = testdir.makepyfile(SIMPLE_TEST)
461
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
462
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
463
                               '--benchmark-compare-fail=min:0.1', test)
464
    result.stderr.fnmatch_lines([
465
        "Comparing against benchmark 0001_unversioned_*.json:",
466
    ])
467
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
468
                               '--benchmark-compare-fail=min:1%', test)
469
    result.stderr.fnmatch_lines([
470
        "Comparing against benchmark 0001_unversioned_*.json:",
471
    ])
472
473
474
def test_compare_non_existing(testdir):
475
    test = testdir.makepyfile(SIMPLE_TEST)
476
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
477
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', '-rw',
478
                               test)
479
    result.stdout.fnmatch_lines([
480
        "WBENCHMARK-C1 * Can't compare. No benchmark files matched '0002'",
481
    ])
482
483
484
def test_compare_non_existing_verbose(testdir):
485
    test = testdir.makepyfile(SIMPLE_TEST)
486
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
487
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002',
488
                               test, '--benchmark-verbose')
489
    result.stderr.fnmatch_lines([
490
        " WARNING: Can't compare. No benchmark files matched '0002'",
491
    ])
492
493
494
def test_compare_no_files(testdir):
495
    test = testdir.makepyfile(SIMPLE_TEST)
496
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
497
                               test, '--benchmark-compare')
498
    result.stdout.fnmatch_lines([
499
         "WBENCHMARK-C3 * Can't compare. No benchmark files in '*'. Expected files matching *.json."
500
         " Can't load the previous benchmark."
501
    ])
502
503
504
def test_compare_no_files_verbose(testdir):
505
    test = testdir.makepyfile(SIMPLE_TEST)
506
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
507
                               test, '--benchmark-compare', '--benchmark-verbose')
508
    result.stderr.fnmatch_lines([
509
        " WARNING: Can't compare. No benchmark files in '*'. Expected files matching *.json."
510
        " Can't load the previous benchmark."
511
    ])
512
513
514
def test_compare_no_files_match(testdir):
515
    test = testdir.makepyfile(SIMPLE_TEST)
516
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
517
                               test, '--benchmark-compare=1')
518
    result.stdout.fnmatch_lines([
519
        "WBENCHMARK-C4 * Can't compare. No benchmark files in '*'. Expected files matching *.json."
520
         " Can't match anything to '1'."
521
    ])
522
523
524
def test_compare_no_files_match_verbose(testdir):
525
    test = testdir.makepyfile(SIMPLE_TEST)
526
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
527
                               test, '--benchmark-compare=1', '--benchmark-verbose')
528
    result.stderr.fnmatch_lines([
529
        " WARNING: Can't compare. No benchmark files in '*'. Expected files matching *.json."
530
        " Can't match anything to '1'."
531
    ])
532
533
534
def test_compare_too_many(testdir):
535
    test = testdir.makepyfile(SIMPLE_TEST)
536
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
537
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
538
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0', '-rw',
539
                               test)
540
    result.stdout.fnmatch_lines([
541
        "WBENCHMARK-C2 * Can't compare. Too many benchmark files matched '0':",
542
        ' - *0001_unversioned_*.json',
543
        ' - *0002_unversioned_*.json',
544
    ])
545
546
547
def test_compare_too_many_verbose(testdir):
548
    test = testdir.makepyfile(SIMPLE_TEST)
549
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
550
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
551
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0',
552
                               '--benchmark-verbose', test)
553
    result.stderr.fnmatch_lines([
554
        " WARNING: Can't compare. Too many benchmark files matched '0':",
555
        ' - *0001_unversioned_*.json',
556
        ' - *0002_unversioned_*.json',
557
    ])
558
559
560
def test_verbose(testdir):
561
    test = testdir.makepyfile(SIMPLE_TEST)
562
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-verbose',
563
                               '-vv', test)
564
    result.stderr.fnmatch_lines([
565
        "  Timer precision: *s",
566
        "  Calibrating to target round *s; will estimate when reaching *s.",
567
        "    Measured * iterations: *s.",
568
        "  Running * rounds x * iterations ...",
569
        "  Ran for *s.",
570
    ])
571
572
573
def test_save(testdir):
574
    test = testdir.makepyfile(SIMPLE_TEST)
575
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
576
                               '--benchmark-max-time=0.0000001', test)
577
    result.stderr.fnmatch_lines([
578
        "Saved benchmark data in *",
579
    ])
580
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
581
582
583
def test_histogram(testdir):
584
    test = testdir.makepyfile(SIMPLE_TEST)
585
    result = testdir.runpytest('--doctest-modules', '--benchmark-histogram=foobar',
586
                               '--benchmark-max-time=0.0000001', test)
587
    result.stderr.fnmatch_lines([
588
        "Generated histogram *foobar-test_histogram.py_test_fast.svg",
589
        "Generated histogram *foobar-test_histogram.py_test_slow.svg",
590
    ])
591
    assert [f.basename for f in testdir.tmpdir.listdir("*.svg", sort=True)] == [
592
        'foobar-test_histogram.py_test_fast.svg',
593
        'foobar-test_histogram.py_test_slow.svg',
594
    ]
595
596
597
def test_autosave(testdir):
598
    test = testdir.makepyfile(SIMPLE_TEST)
599
    result = testdir.runpytest('--doctest-modules', '--benchmark-autosave',
600
                               '--benchmark-max-time=0.0000001', test)
601
    result.stderr.fnmatch_lines([
602
        "Saved benchmark data in *",
603
    ])
604
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].listdir('0001_*.json')[0].read())
605
606
607
def test_bogus_min_time(testdir):
608
    test = testdir.makepyfile(SIMPLE_TEST)
609
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-time=bogus', test)
610
    result.stderr.fnmatch_lines([
611
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
612
        "py*: error: argument --benchmark-min-time: Invalid decimal value 'bogus': InvalidOperation*",
613
    ])
614
615
616
def test_disable_gc(testdir):
617
    test = testdir.makepyfile(SIMPLE_TEST)
618
    result = testdir.runpytest('--benchmark-disable-gc', test)
619
    result.stdout.fnmatch_lines([
620
        "*collected 2 items",
621
        "test_disable_gc.py ..",
622
        "* benchmark: 2 tests *",
623
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
624
        "------*",
625
        "test_fast          *",
626
        "test_slow          *",
627
        "------*",
628
        "*====== 2 passed* seconds ======*",
629
    ])
630
631
632
def test_custom_timer(testdir):
633
    test = testdir.makepyfile(SIMPLE_TEST)
634
    result = testdir.runpytest('--benchmark-timer=time.time', test)
635
    result.stdout.fnmatch_lines([
636
        "*collected 2 items",
637
        "test_custom_timer.py ..",
638
        "* benchmark: 2 tests *",
639
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
640
        "------*",
641
        "test_fast          *",
642
        "test_slow          *",
643
        "------*",
644
        "*====== 2 passed* seconds ======*",
645
    ])
646
647
648
def test_bogus_timer(testdir):
649
    test = testdir.makepyfile(SIMPLE_TEST)
650
    result = testdir.runpytest('--benchmark-timer=bogus', test)
651
    result.stderr.fnmatch_lines([
652
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
653
        "py*: error: argument --benchmark-timer: Value for --benchmark-timer must be in dotted form. Eg: "
654
        "'module.attr'.",
655
    ])
656
657
658
def test_sort_by_mean(testdir):
659
    test = testdir.makepyfile(SIMPLE_TEST)
660
    result = testdir.runpytest('--benchmark-sort=mean', test)
661
    result.stdout.fnmatch_lines([
662
        "*collected 2 items",
663
        "test_sort_by_mean.py ..",
664
        "* benchmark: 2 tests *",
665
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
666
        "------*",
667
        "test_fast          *",
668
        "test_slow          *",
669
        "------*",
670
        "*====== 2 passed* seconds ======*",
671
    ])
672
673
674
def test_bogus_sort(testdir):
675
    test = testdir.makepyfile(SIMPLE_TEST)
676
    result = testdir.runpytest('--benchmark-sort=bogus', test)
677
    result.stderr.fnmatch_lines([
678
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
679
        "py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one of: 'min', 'max', 'mean' or 'stddev'."
680
681
    ])
682
683
684
def test_xdist(testdir):
685
    pytest.importorskip('xdist')
686
    test = testdir.makepyfile(SIMPLE_TEST)
687
    result = testdir.runpytest('--doctest-modules', '-n', '1', '-rw', test)
688
    result.stdout.fnmatch_lines([
689
        "WBENCHMARK-U2 * Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be "
690
        "performed reliably in a parallelized environment.",
691
    ])
692
693
694
def test_xdist_verbose(testdir):
695
    pytest.importorskip('xdist')
696
    test = testdir.makepyfile(SIMPLE_TEST)
697
    result = testdir.runpytest('--doctest-modules', '-n', '1', '--benchmark-verbose', test)
698
    result.stderr.fnmatch_lines([
699
        "------*",
700
        " WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed "
701
        "reliably in a parallelized environment.",
702
        "------*",
703
    ])
704
705
706
def test_abort_broken(testdir):
707
    """
708
    Test that we don't benchmark code that raises exceptions.
709
    """
710
    test = testdir.makepyfile('''
711
"""
712
    >>> print('Yay, doctests!')
713
    Yay, doctests!
714
"""
715
import time
716
import pytest
717
718
def test_bad(benchmark):
719
    @benchmark
720
    def result():
721
        raise Exception()
722
    assert 1 == 1
723
724
def test_bad2(benchmark):
725
    @benchmark
726
    def result():
727
        time.sleep(0.1)
728
    assert 1 == 0
729
730
@pytest.fixture(params=['a', 'b', 'c'])
731
def bad_fixture(request):
732
    raise ImportError()
733
734
def test_ok(benchmark, bad_fixture):
735
    @benchmark
736
    def result():
737
        time.sleep(0.1)
738
    assert 1 == 0
739
''')
740
    result = testdir.runpytest('-vv', test)
741
    result.stdout.fnmatch_lines([
742
        "*collected 5 items",
743
744
        "test_abort_broken.py::test_bad FAILED",
745
        "test_abort_broken.py::test_bad2 FAILED",
746
        "test_abort_broken.py::test_ok[a] ERROR",
747
        "test_abort_broken.py::test_ok[b] ERROR",
748
        "test_abort_broken.py::test_ok[c] ERROR",
749
750
        "*====== ERRORS ======*",
751
        "*______ ERROR at setup of test_ok[[]a[]] ______*",
752
753
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[a]'>>",
754
755
        "    @pytest.fixture(params=['a', 'b', 'c'])",
756
        "    def bad_fixture(request):",
757
        ">       raise ImportError()",
758
        "E       ImportError",
759
760
        "test_abort_broken.py:22: ImportError",
761
        "*______ ERROR at setup of test_ok[[]b[]] ______*",
762
763
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[b]'>>",
764
765
        "    @pytest.fixture(params=['a', 'b', 'c'])",
766
        "    def bad_fixture(request):",
767
        ">       raise ImportError()",
768
        "E       ImportError",
769
770
        "test_abort_broken.py:22: ImportError",
771
        "*______ ERROR at setup of test_ok[[]c[]] ______*",
772
773
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[c]'>>",
774
775
        "    @pytest.fixture(params=['a', 'b', 'c'])",
776
        "    def bad_fixture(request):",
777
        ">       raise ImportError()",
778
        "E       ImportError",
779
780
        "test_abort_broken.py:22: ImportError",
781
        "*====== FAILURES ======*",
782
        "*______ test_bad ______*",
783
784
        "benchmark = <pytest_benchmark.plugin.BenchmarkFixture object at *>",
785
786
        "    def test_bad(benchmark):",
787
        ">       @benchmark",
788
        "        def result():",
789
790
        "test_abort_broken.py:*",
791
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
792
        "*pytest_benchmark/plugin.py:*: in __call__",
793
        "    duration, iterations, loops_range = self._calibrate_timer(runner)",
794
        "*pytest_benchmark/plugin.py:*: in _calibrate_timer",
795
        "    duration = runner(loops_range)",
796
        "*pytest_benchmark/plugin.py:*: in runner",
797
        "    *",
798
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
799
800
        "    @benchmark",
801
        "    def result():",
802
        ">       raise Exception()",
803
        "E       Exception",
804
805
        "test_abort_broken.py:11: Exception",
806
        "*______ test_bad2 ______*",
807
808
        "benchmark = <pytest_benchmark.plugin.BenchmarkFixture object at *>",
809
810
        "    def test_bad2(benchmark):",
811
        "        @benchmark",
812
        "        def result():",
813
        "            time.sleep(0.1)",
814
        ">       assert 1 == 0",
815
        "E       assert 1 == 0",
816
817
        "test_abort_broken.py:18: AssertionError",
818
        "* benchmark: 1 tests *",
819
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
820
        "------*",
821
        "test_bad2           *",
822
        "------*",
823
824
        "*====== 2 failed*, 3 error* seconds ======*",
825
    ])
826
827
828
BASIC_TEST = '''
829
"""
830
Just to make sure the plugin doesn't choke on doctests::
831
    >>> print('Yay, doctests!')
832
    Yay, doctests!
833
"""
834
import time
835
from functools import partial
836
837
import pytest
838
839
def test_fast(benchmark):
840
    @benchmark
841
    def result():
842
        return time.sleep(0.000001)
843
    assert result is None
844
845
def test_slow(benchmark):
846
    assert benchmark(partial(time.sleep, 0.001)) is None
847
848
def test_slower(benchmark):
849
    benchmark(lambda: time.sleep(0.01))
850
851
@pytest.mark.benchmark(min_rounds=2)
852
def test_xfast(benchmark):
853
    benchmark(str)
854
855
def test_fast(benchmark):
856
    benchmark(int)
857
'''
858
859
860
def test_basic(testdir):
861
    test = testdir.makepyfile(BASIC_TEST)
862
    result = testdir.runpytest('-vv', '--doctest-modules', test)
863
    result.stdout.fnmatch_lines([
864
        "*collected 5 items",
865
        "test_basic.py::*test_basic PASSED",
866
        "test_basic.py::test_slow PASSED",
867
        "test_basic.py::test_slower PASSED",
868
        "test_basic.py::test_xfast PASSED",
869
        "test_basic.py::test_fast PASSED",
870
        "",
871
        "* benchmark: 4 tests *",
872
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
873
        "------*",
874
        "test_*         *",
875
        "test_*         *",
876
        "test_*         *",
877
        "test_*         *",
878
        "------*",
879
        "",
880
        "*====== 5 passed* seconds ======*",
881
    ])
882
883
884
def test_skip(testdir):
885
    test = testdir.makepyfile(BASIC_TEST)
886
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-skip', test)
887
    result.stdout.fnmatch_lines([
888
        "*collected 5 items",
889
        "test_skip.py::*test_skip PASSED",
890
        "test_skip.py::test_slow SKIPPED",
891
        "test_skip.py::test_slower SKIPPED",
892
        "test_skip.py::test_xfast SKIPPED",
893
        "test_skip.py::test_fast SKIPPED",
894
        "*====== 1 passed, 4 skipped* seconds ======*",
895
    ])
896
897
898
def test_disable(testdir):
899
    test = testdir.makepyfile(BASIC_TEST)
900
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test)
901
    result.stdout.fnmatch_lines([
902
        "*collected 5 items",
903
        "test_disable.py::*test_disable PASSED",
904
        "test_disable.py::test_slow PASSED",
905
        "test_disable.py::test_slower PASSED",
906
        "test_disable.py::test_xfast PASSED",
907
        "test_disable.py::test_fast PASSED",
908
        "*====== 5 passed * seconds ======*",
909
    ])
910
911
912
def test_mark_selection(testdir):
913
    test = testdir.makepyfile(BASIC_TEST)
914
    result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test)
915
    result.stdout.fnmatch_lines([
916
        "*collected 5 items",
917
        "test_mark_selection.py::test_xfast PASSED",
918
        "* benchmark: 1 tests *",
919
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
920
        "------*",
921
        "test_xfast       *",
922
        "------*",
923
        "*====== 4 tests deselected by \"-m 'benchmark'\" ======*",
924
        "*====== 1 passed, 4 deselected* seconds ======*",
925
    ])
926
927
928
def test_only_benchmarks(testdir):
929
    test = testdir.makepyfile(BASIC_TEST)
930
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-only', test)
931
    result.stdout.fnmatch_lines([
932
        "*collected 5 items",
933
        "test_only_benchmarks.py::*test_only_benchmarks SKIPPED",
934
        "test_only_benchmarks.py::test_slow PASSED",
935
        "test_only_benchmarks.py::test_slower PASSED",
936
        "test_only_benchmarks.py::test_xfast PASSED",
937
        "test_only_benchmarks.py::test_fast PASSED",
938
        "* benchmark: 4 tests *",
939
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
940
        "------*",
941
        "test_*         *",
942
        "test_*         *",
943
        "test_*         *",
944
        "test_*         *",
945
        "------*",
946
        "*====== 4 passed, 1 skipped* seconds ======*",
947
    ])
948
949
def test_columns(testdir):
950
    test = testdir.makepyfile(SIMPLE_TEST)
951
    result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,iterations,min', test)
952
    result.stdout.fnmatch_lines([
953
        "*collected 3 items",
954
        "test_columns.py ...",
955
        "* benchmark: 2 tests *",
956
        "Name (time in ?s) * Max * Iterations * Min *",
957
        "------*",
958
        "test_fast          * 1  *",
959
        "test_slow          * 1  *",
960
        "------*",
961
        "*====== 3 passed* seconds ======*",
962
    ])
963