Completed
Pull Request — master (#93)
by
unknown
29s
created

test_save_percentiles()   A

Complexity

Conditions 2

Size

Total Lines 10

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 0
Metric Value
cc 2
dl 0
loc 10
rs 9.4285
c 1
b 0
f 0
1
import json
2
import platform
3
4
import pytest
5
6
pytest_plugins = 'pytester',
7
platform
8
9
10
def test_help(testdir):
11
    result = testdir.runpytest('--help')
12
    result.stdout.fnmatch_lines([
13
        "*", "*",
14
        "benchmark:",
15
        "  --benchmark-min-time=SECONDS",
16
        "                        Minimum time per round in seconds. Default: '0.000005'",
17
        "  --benchmark-max-time=SECONDS",
18
        "                        Maximum run time per test - it will be repeated until",
19
        "                        this total time is reached. It may be exceeded if test",
20
        "                        function is very slow or --benchmark-min-rounds is",
21
        "                        large (it takes precedence). Default: '1.0'",
22
        "  --benchmark-min-rounds=NUM",
23
        "                        Minimum rounds, even if total time would exceed",
24
        "                        `--max-time`. Default: 5",
25
        "  --benchmark-timer=FUNC",
26
        "                        Timer to use when measuring time. Default:*",
27
        "  --benchmark-calibration-precision=NUM",
28
        "                        Precision to use when calibrating number of",
29
        "                        iterations. Precision of 10 will make the timer look",
30
        "                        10 times more accurate, at a cost of less precise",
31
        "                        measure of deviations. Default: 10",
32
        "  --benchmark-warmup=[KIND]",
33
        "                        Activates warmup. Will run the test function up to",
34
        "                        number of times in the calibration phase. See",
35
        "                        `--benchmark-warmup-iterations`. Note: Even the warmup",
36
        "                        phase obeys --benchmark-max-time. Available KIND:",
37
        "                        'auto', 'off', 'on'. Default: 'auto' (automatically",
38
        "                        activate on PyPy).",
39
        "  --benchmark-warmup-iterations=NUM",
40
        "                        Max number of iterations to run in the warmup phase.",
41
        "                        Default: 100000",
42
        "  --benchmark-disable-gc",
43
        "                        Disable GC during benchmarks.",
44
        "  --benchmark-skip      Skip running any tests that contain benchmarks.",
45
        "  --benchmark-only      Only run benchmarks.",
46
        "  --benchmark-save=NAME",
47
        "                        Save the current run into 'STORAGE-",
48
        "                        PATH/counter_NAME.json'.",
49
        "  --benchmark-autosave  Autosave the current run into 'STORAGE-",
50
        "                        PATH/counter*.json",
51
        "  --benchmark-save-data",
52
        "                        Use this to make --benchmark-save and --benchmark-",
53
        "                        autosave include all the timing data, not just the",
54
        "                        stats.",
55
        "  --benchmark-json=PATH",
56
        "                        Dump a JSON report into PATH. Note that this will",
57
        "                        include the complete data (all the timings, not just",
58
        "                        the stats).",
59
        "  --benchmark-compare=[NUM|_ID]",
60
        "                        Compare the current run against run NUM (or prefix of",
61
        "                        _id in elasticsearch) or the latest saved run if",
62
        "                        unspecified.",
63
        "  --benchmark-compare-fail=EXPR?[[]EXPR?...[]]",
64
        "                        Fail test if performance regresses according to given",
65
        "                        EXPR (eg: min:5% or mean:0.001 for number of seconds).",
66
        "                        Can be used multiple times.",
67
        "  --benchmark-cprofile=COLUMN",
68
        "                        If specified measure one run with cProfile and stores",
69
        "                        10 top functions. Argument is a column to sort by.",
70
        "                        Available columns: 'ncallls_recursion', 'ncalls',",
71
        "                        'tottime', 'tottime_per', 'cumtime', 'cumtime_per',",
72
        "                        'function_name'.",
73
        "  --benchmark-storage=URI",
74
        "                        Specify a path to store the runs as uri in form",
75
        "                        file://path or elasticsearch+http[s]://host1,host2/[in",
76
        "                        dex/doctype?project_name=Project] (when --benchmark-",
77
        "                        save or --benchmark-autosave are used). For backwards",
78
        "                        compatibility unexpected values are converted to",
79
        "                        file://<value>. Default: 'file://./.benchmarks'.",
80
        "  --benchmark-verbose   Dump diagnostic and progress information.",
81
        "  --benchmark-sort=COL  Column to sort on. Can be one of: 'min', 'max',",
82
        "                        'mean', 'stddev', 'name', 'fullname'. Default: 'min'",
83
        "  --benchmark-group-by=LABEL",
84
        "                        How to group tests. Can be one of: 'group', 'name',",
85
        "                        'fullname', 'func', 'fullfunc', 'param' or",
86
        "                        'param:NAME', where NAME is the name passed to",
87
        "                        @pytest.parametrize. Default: 'group'",
88
        "  --benchmark-columns=LABELS",
89
        "                        Comma-separated list of columns to show in the result",
90
        "                        table. Use 'pXX.XX' (e.g. 'p99.9') to show",
91
        "                        percentiles. Default: 'min, max, mean, stddev, median,",
92
        "                        iqr, outliers, rounds, iterations'",
93
        "  --benchmark-histogram=[FILENAME-PREFIX]",
94
        "                        Plot graphs of min/max/avg/stddev over time in",
95
        "                        FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX",
96
        "                        contains slashes ('/') then directories will be",
97
        "                        created. Default: '*'",
98
        "*",
99
    ])
100
101
102
def test_groups(testdir):
103
    test = testdir.makepyfile('''"""
104
    >>> print('Yay, doctests!')
105
    Yay, doctests!
106
"""
107
import time
108
import pytest
109
110
def test_fast(benchmark):
111
    benchmark(lambda: time.sleep(0.000001))
112
    assert 1 == 1
113
114
def test_slow(benchmark):
115
    benchmark(lambda: time.sleep(0.001))
116
    assert 1 == 1
117
118
@pytest.mark.benchmark(group="A")
119
def test_slower(benchmark):
120
    benchmark(lambda: time.sleep(0.01))
121
    assert 1 == 1
122
123
@pytest.mark.benchmark(group="A", warmup=True)
124
def test_xfast(benchmark):
125
    benchmark(lambda: None)
126
    assert 1 == 1
127
''')
128
    result = testdir.runpytest('-vv', '--doctest-modules', test)
129
    result.stdout.fnmatch_lines([
130
        "*collected 5 items",
131
        "*",
132
        "test_groups.py::*test_groups PASSED",
133
        "test_groups.py::test_fast PASSED",
134
        "test_groups.py::test_slow PASSED",
135
        "test_groups.py::test_slower PASSED",
136
        "test_groups.py::test_xfast PASSED",
137
        "*",
138
        "* benchmark: 2 tests *",
139
        "*",
140
        "* benchmark 'A': 2 tests *",
141
        "*",
142
        "*====== 5 passed* seconds ======*",
143
    ])
144
145
146
SIMPLE_TEST = '''
147
"""
148
    >>> print('Yay, doctests!')
149
    Yay, doctests!
150
"""
151
import time
152
import pytest
153
154
def test_fast(benchmark):
155
    @benchmark
156
    def result():
157
        return time.sleep(0.000001)
158
    assert result == None
159
160
def test_slow(benchmark):
161
    benchmark(lambda: time.sleep(0.1))
162
    assert 1 == 1
163
'''
164
165
GROUPING_TEST = '''
166
import pytest
167
168
@pytest.mark.parametrize("foo", range(2))
169
@pytest.mark.benchmark(group="A")
170
def test_a(benchmark, foo):
171
    benchmark(str)
172
173
@pytest.mark.parametrize("foo", range(2))
174
@pytest.mark.benchmark(group="B")
175
def test_b(benchmark, foo):
176
    benchmark(int)
177
'''
178
179
GROUPING_PARAMS_TEST = '''
180
import pytest
181
182
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
183
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
184
@pytest.mark.benchmark(group="A")
185
def test_a(benchmark, foo, bar):
186
    benchmark(str)
187
188
189
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
190
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
191
@pytest.mark.benchmark(group="B")
192
def test_b(benchmark, foo, bar):
193
    benchmark(int)
194
'''
195
196
197
def test_group_by_name(testdir):
198
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
199
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
200
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'name', test_x, test_y)
201
    result.stdout.fnmatch_lines([
202
        '*', '*', '*', '*', '*',
203
        "* benchmark 'test_a[[]0[]]': 2 tests *",
204
        'Name (time in ?s)     *',
205
        '----------------------*',
206
        'test_a[[]0[]]             *',
207
        'test_a[[]0[]]             *',
208
        '----------------------*',
209
        '*',
210
        "* benchmark 'test_a[[]1[]]': 2 tests *",
211
        'Name (time in ?s)     *',
212
        '----------------------*',
213
        'test_a[[]1[]]             *',
214
        'test_a[[]1[]]             *',
215
        '----------------------*',
216
        '*',
217
        "* benchmark 'test_b[[]0[]]': 2 tests *",
218
        'Name (time in ?s)     *',
219
        '----------------------*',
220
        'test_b[[]0[]]             *',
221
        'test_b[[]0[]]             *',
222
        '----------------------*',
223
        '*',
224
        "* benchmark 'test_b[[]1[]]': 2 tests *",
225
        'Name (time in ?s)     *',
226
        '----------------------*',
227
        'test_b[[]1[]]             *',
228
        'test_b[[]1[]]             *',
229
        '----------------------*',
230
    ])
231
232
233
def test_group_by_func(testdir):
234
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
235
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
236
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'func', test_x, test_y)
237
    result.stdout.fnmatch_lines([
238
        '*', '*', '*', '*',
239
        "* benchmark 'test_a': 4 tests *",
240
        'Name (time in ?s)     *',
241
        '----------------------*',
242
        'test_a[[]*[]]             *',
243
        'test_a[[]*[]]             *',
244
        'test_a[[]*[]]             *',
245
        'test_a[[]*[]]             *',
246
        '----------------------*',
247
        '*',
248
        "* benchmark 'test_b': 4 tests *",
249
        'Name (time in ?s)     *',
250
        '----------------------*',
251
        'test_b[[]*[]]             *',
252
        'test_b[[]*[]]             *',
253
        'test_b[[]*[]]             *',
254
        'test_b[[]*[]]             *',
255
        '----------------------*',
256
        '*', '*',
257
        '============* 8 passed* seconds ============*',
258
    ])
259
260
261
def test_group_by_fullfunc(testdir):
262
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
263
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
264
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullfunc', test_x, test_y)
265
    result.stdout.fnmatch_lines([
266
        '*', '*', '*', '*', '*',
267
        "* benchmark 'test_x.py::test_a': 2 tests *",
268
        'Name (time in ?s) *',
269
        '------------------*',
270
        'test_a[[]*[]]         *',
271
        'test_a[[]*[]]         *',
272
        '------------------*',
273
        '',
274
        "* benchmark 'test_x.py::test_b': 2 tests *",
275
        'Name (time in ?s) *',
276
        '------------------*',
277
        'test_b[[]*[]]         *',
278
        'test_b[[]*[]]         *',
279
        '------------------*',
280
        '',
281
        "* benchmark 'test_y.py::test_a': 2 tests *",
282
        'Name (time in ?s) *',
283
        '------------------*',
284
        'test_a[[]*[]]         *',
285
        'test_a[[]*[]]         *',
286
        '------------------*',
287
        '',
288
        "* benchmark 'test_y.py::test_b': 2 tests *",
289
        'Name (time in ?s) *',
290
        '------------------*',
291
        'test_b[[]*[]]         *',
292
        'test_b[[]*[]]         *',
293
        '------------------*',
294
        '',
295
        'Legend:',
296
        '  Outliers: 1 Standard Deviation from M*',
297
        '============* 8 passed* seconds ============*',
298
    ])
299
300
301
def test_group_by_param_all(testdir):
302
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
303
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
304
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x, test_y)
305
    result.stdout.fnmatch_lines([
306
        '*', '*', '*', '*', '*',
307
        "* benchmark '0': 4 tests *",
308
        'Name (time in ?s)  *',
309
        '-------------------*',
310
        'test_*[[]0[]]          *',
311
        'test_*[[]0[]]          *',
312
        'test_*[[]0[]]          *',
313
        'test_*[[]0[]]          *',
314
        '-------------------*',
315
        '',
316
        "* benchmark '1': 4 tests *",
317
        'Name (time in ?s) *',
318
        '------------------*',
319
        'test_*[[]1[]]         *',
320
        'test_*[[]1[]]         *',
321
        'test_*[[]1[]]         *',
322
        'test_*[[]1[]]         *',
323
        '------------------*',
324
        '',
325
        'Legend:',
326
        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
327
        'Quartile.',
328
        '============* 8 passed* seconds ============*',
329
    ])
330
331
332
def test_group_by_param_select(testdir):
333
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
334
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
335
                               '--benchmark-group-by', 'param:foo',
336
                               '--benchmark-sort', 'fullname',
337
                               test_x)
338
    result.stdout.fnmatch_lines([
339
        '*', '*', '*', '*', '*',
340
        "* benchmark 'foo=foo1': 4 tests *",
341
        'Name (time in ?s)  *',
342
        '-------------------*',
343
        'test_a[[]foo1-bar1[]]    *',
344
        'test_a[[]foo1-bar2[]]    *',
345
        'test_b[[]foo1-bar1[]]    *',
346
        'test_b[[]foo1-bar2[]]    *',
347
        '-------------------*',
348
        '',
349
        "* benchmark 'foo=foo2': 4 tests *",
350
        'Name (time in ?s) *',
351
        '------------------*',
352
        'test_a[[]foo2-bar1[]]    *',
353
        'test_a[[]foo2-bar2[]]    *',
354
        'test_b[[]foo2-bar1[]]    *',
355
        'test_b[[]foo2-bar2[]]    *',
356
        '------------------*',
357
        '',
358
        'Legend:',
359
        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
360
        'Quartile.',
361
        '============* 8 passed* seconds ============*',
362
    ])
363
364
365
def test_group_by_param_select_multiple(testdir):
366
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
367
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
368
                               '--benchmark-group-by', 'param:foo,param:bar',
369
                               '--benchmark-sort', 'fullname',
370
                               test_x)
371
    result.stdout.fnmatch_lines([
372
        '*', '*', '*', '*', '*',
373
        "* benchmark 'foo=foo1 bar=bar1': 2 tests *",
374
        'Name (time in ?s)  *',
375
        '-------------------*',
376
        'test_a[[]foo1-bar1[]]    *',
377
        'test_b[[]foo1-bar1[]]    *',
378
        '-------------------*',
379
        '',
380
        "* benchmark 'foo=foo1 bar=bar2': 2 tests *",
381
        'Name (time in ?s)  *',
382
        '-------------------*',
383
        'test_a[[]foo1-bar2[]]    *',
384
        'test_b[[]foo1-bar2[]]    *',
385
        '-------------------*',
386
        '',
387
        "* benchmark 'foo=foo2 bar=bar1': 2 tests *",
388
        'Name (time in ?s) *',
389
        '------------------*',
390
        'test_a[[]foo2-bar1[]]    *',
391
        'test_b[[]foo2-bar1[]]    *',
392
        '-------------------*',
393
        '',
394
        "* benchmark 'foo=foo2 bar=bar2': 2 tests *",
395
        'Name (time in ?s)  *',
396
        '-------------------*',
397
        'test_a[[]foo2-bar2[]]    *',
398
        'test_b[[]foo2-bar2[]]    *',
399
        '------------------*',
400
        '',
401
        'Legend:',
402
        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
403
        'Quartile.',
404
        '============* 8 passed* seconds ============*',
405
    ])
406
407
408
def test_group_by_fullname(testdir):
409
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
410
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
411
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullname', test_x, test_y)
412
    result.stdout.fnmatch_lines_random([
413
        "* benchmark 'test_x.py::test_a[[]0[]]': 1 tests *",
414
        "* benchmark 'test_x.py::test_a[[]1[]]': 1 tests *",
415
        "* benchmark 'test_x.py::test_b[[]0[]]': 1 tests *",
416
        "* benchmark 'test_x.py::test_b[[]1[]]': 1 tests *",
417
        "* benchmark 'test_y.py::test_a[[]0[]]': 1 tests *",
418
        "* benchmark 'test_y.py::test_a[[]1[]]': 1 tests *",
419
        "* benchmark 'test_y.py::test_b[[]0[]]': 1 tests *",
420
        "* benchmark 'test_y.py::test_b[[]1[]]': 1 tests *",
421
        '============* 8 passed* seconds ============*',
422
    ])
423
424
425
def test_double_use(testdir):
426
    test = testdir.makepyfile('''
427
def test_a(benchmark):
428
    benchmark(lambda: None)
429
    benchmark.pedantic(lambda: None)
430
431
def test_b(benchmark):
432
    benchmark.pedantic(lambda: None)
433
    benchmark(lambda: None)
434
''')
435
    result = testdir.runpytest(test, '--tb=line')
436
    result.stdout.fnmatch_lines([
437
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark(...) mode.',
438
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark.pedantic(...) mode.',
439
    ])
440
441
442
def test_conflict_between_only_and_skip(testdir):
443
    test = testdir.makepyfile(SIMPLE_TEST)
444
    result = testdir.runpytest('--benchmark-only', '--benchmark-skip', test)
445
    result.stderr.fnmatch_lines([
446
        "ERROR: Can't have both --benchmark-only and --benchmark-skip options."
447
    ])
448
449
450
def test_conflict_between_only_and_disable(testdir):
451
    test = testdir.makepyfile(SIMPLE_TEST)
452
    result = testdir.runpytest('--benchmark-only', '--benchmark-disable', test)
453
    result.stderr.fnmatch_lines([
454
        "ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
455
        "automatically activated if xdist is on or you're missing the statistics dependency."
456
    ])
457
458
459
def test_max_time_min_rounds(testdir):
460
    test = testdir.makepyfile(SIMPLE_TEST)
461
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test)
462
    result.stdout.fnmatch_lines([
463
        "*collected 3 items",
464
        "test_max_time_min_rounds.py ...",
465
        "* benchmark: 2 tests *",
466
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
467
        "------*",
468
        "test_fast          * 1  *",
469
        "test_slow          * 1  *",
470
        "------*",
471
        "*====== 3 passed* seconds ======*",
472
    ])
473
474
475
def test_max_time(testdir):
476
    test = testdir.makepyfile(SIMPLE_TEST)
477
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', test)
478
    result.stdout.fnmatch_lines([
479
        "*collected 3 items",
480
        "test_max_time.py ...",
481
        "* benchmark: 2 tests *",
482
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
483
        "------*",
484
        "test_fast          * 5  *",
485
        "test_slow          * 5  *",
486
        "------*",
487
        "*====== 3 passed* seconds ======*",
488
    ])
489
490
491
def test_bogus_max_time(testdir):
492
    test = testdir.makepyfile(SIMPLE_TEST)
493
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=bogus', test)
494
    result.stderr.fnmatch_lines([
495
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
496
        "py*: error: argument --benchmark-max-time: Invalid decimal value 'bogus': InvalidOperation*",
497
    ])
498
499
500
@pytest.mark.skipif("platform.python_implementation() == 'PyPy'")
501
def test_pep418_timer(testdir):
502
    test = testdir.makepyfile(SIMPLE_TEST)
503
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
504
                               '--benchmark-timer=pep418.perf_counter', test)
505
    result.stdout.fnmatch_lines([
506
        "* (defaults: timer=*.perf_counter*",
507
    ])
508
509
510
def test_bad_save(testdir):
511
    test = testdir.makepyfile(SIMPLE_TEST)
512
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=asd:f?', test)
513
    result.stderr.fnmatch_lines([
514
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
515
        "py*: error: argument --benchmark-save: Must not contain any of these characters: /:*?<>|\\ (it has ':?')",
516
    ])
517
518
519
def test_bad_save_2(testdir):
520
    test = testdir.makepyfile(SIMPLE_TEST)
521
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=', test)
522
    result.stderr.fnmatch_lines([
523
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
524
        "py*: error: argument --benchmark-save: Can't be empty.",
525
    ])
526
527
528
def test_bad_compare_fail(testdir):
529
    test = testdir.makepyfile(SIMPLE_TEST)
530
    result = testdir.runpytest('--doctest-modules', '--benchmark-compare-fail=?', test)
531
    result.stderr.fnmatch_lines([
532
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
533
        "py*: error: argument --benchmark-compare-fail: Could not parse value: '?'.",
534
    ])
535
536
537
def test_bad_rounds(testdir):
538
    test = testdir.makepyfile(SIMPLE_TEST)
539
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=asd', test)
540
    result.stderr.fnmatch_lines([
541
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
542
        "py*: error: argument --benchmark-min-rounds: invalid literal for int() with base 10: 'asd'",
543
    ])
544
545
546
def test_bad_rounds_2(testdir):
547
    test = testdir.makepyfile(SIMPLE_TEST)
548
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=0', test)
549
    result.stderr.fnmatch_lines([
550
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
551
        "py*: error: argument --benchmark-min-rounds: Value for --benchmark-rounds must be at least 1.",
552
    ])
553
554
555
def test_compare(testdir):
556
    test = testdir.makepyfile(SIMPLE_TEST)
557
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
558
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
559
                               '--benchmark-compare-fail=min:0.1', test)
560
    result.stderr.fnmatch_lines([
561
        "Comparing against benchmarks from: *0001_unversioned_*.json",
562
    ])
563
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
564
                               '--benchmark-compare-fail=min:1%', test)
565
    result.stderr.fnmatch_lines([
566
        "Comparing against benchmarks from: *0001_unversioned_*.json",
567
    ])
568
569
570
def test_compare_last(testdir):
571
    test = testdir.makepyfile(SIMPLE_TEST)
572
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
573
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
574
                               '--benchmark-compare-fail=min:0.1', test)
575
    result.stderr.fnmatch_lines([
576
        "Comparing against benchmarks from: *0001_unversioned_*.json",
577
    ])
578
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
579
                               '--benchmark-compare-fail=min:1%', test)
580
    result.stderr.fnmatch_lines([
581
        "Comparing against benchmarks from: *0001_unversioned_*.json",
582
    ])
583
584
585
def test_compare_non_existing(testdir):
586
    test = testdir.makepyfile(SIMPLE_TEST)
587
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
588
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', '-rw',
589
                               test)
590
    result.stdout.fnmatch_lines([
591
        "* Can't compare. No benchmark files * '0002'.",
592
    ])
593
594
595
def test_compare_non_existing_verbose(testdir):
596
    test = testdir.makepyfile(SIMPLE_TEST)
597
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
598
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002',
599
                               test, '--benchmark-verbose')
600
    result.stderr.fnmatch_lines([
601
        " WARNING: Can't compare. No benchmark files * '0002'.",
602
    ])
603
604
605
def test_compare_no_files(testdir):
606
    test = testdir.makepyfile(SIMPLE_TEST)
607
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
608
                               test, '--benchmark-compare')
609
    result.stdout.fnmatch_lines([
610
        "* Can't compare. No benchmark files in '*'."
611
        " Can't load the previous benchmark."
612
    ])
613
614
615
def test_compare_no_files_verbose(testdir):
616
    test = testdir.makepyfile(SIMPLE_TEST)
617
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
618
                               test, '--benchmark-compare', '--benchmark-verbose')
619
    result.stderr.fnmatch_lines([
620
        " WARNING: Can't compare. No benchmark files in '*'."
621
        " Can't load the previous benchmark."
622
    ])
623
624
625
def test_compare_no_files_match(testdir):
626
    test = testdir.makepyfile(SIMPLE_TEST)
627
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
628
                               test, '--benchmark-compare=1')
629
    result.stdout.fnmatch_lines([
630
        "* Can't compare. No benchmark files in '*' match '1'."
631
    ])
632
633
634
def test_compare_no_files_match_verbose(testdir):
635
    test = testdir.makepyfile(SIMPLE_TEST)
636
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
637
                               test, '--benchmark-compare=1', '--benchmark-verbose')
638
    result.stderr.fnmatch_lines([
639
        " WARNING: Can't compare. No benchmark files in '*' match '1'."
640
    ])
641
642
643
def test_verbose(testdir):
644
    test = testdir.makepyfile(SIMPLE_TEST)
645
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-verbose',
646
                               '-vv', test)
647
    result.stderr.fnmatch_lines([
648
        "  Timer precision: *s",
649
        "  Calibrating to target round *s; will estimate when reaching *s.",
650
        "    Measured * iterations: *s.",
651
        "  Running * rounds x * iterations ...",
652
        "  Ran for *s.",
653
    ])
654
655
656
def test_save(testdir):
657
    test = testdir.makepyfile(SIMPLE_TEST)
658
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
659
                               '--benchmark-max-time=0.0000001', test)
660
    result.stderr.fnmatch_lines([
661
        "Saved benchmark data in: *",
662
    ])
663
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
664
665
666
def test_save_extra_info(testdir):
667
    test = testdir.makepyfile("""
668
    def test_extra(benchmark):
669
        benchmark.extra_info['foo'] = 'bar'
670
        benchmark(lambda: None)
671
    """)
672
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
673
                               '--benchmark-max-time=0.0000001', test)
674
    result.stderr.fnmatch_lines([
675
        "Saved benchmark data in: *",
676
    ])
677
    info = json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
678
    bench_info = info['benchmarks'][0]
679
    assert bench_info['name'] == 'test_extra'
680
    assert bench_info['extra_info'] == {'foo': 'bar'}
681
682
683
def test_save_percentiles(testdir):
684
    test = testdir.makepyfile(SIMPLE_TEST)
685
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
686
                               '--benchmark-max-time=0.0000001', '--benchmark-columns=min,p99,max', test)
687
    result.stderr.fnmatch_lines([
688
        "Saved benchmark data in: *",
689
    ])
690
    info = json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
691
    bench_info = info['benchmarks'][0]
692
    assert 'p99' in bench_info['stats']
693
694
695
def test_histogram(testdir):
696
    test = testdir.makepyfile(SIMPLE_TEST)
697
    result = testdir.runpytest('--doctest-modules', '--benchmark-histogram=foobar',
698
                               '--benchmark-max-time=0.0000001', test)
699
    result.stderr.fnmatch_lines([
700
        "Generated histogram: *foobar.svg",
701
    ])
702
    assert [f.basename for f in testdir.tmpdir.listdir("*.svg", sort=True)] == [
703
        'foobar.svg',
704
    ]
705
706
707
def test_autosave(testdir):
708
    test = testdir.makepyfile(SIMPLE_TEST)
709
    result = testdir.runpytest('--doctest-modules', '--benchmark-autosave',
710
                               '--benchmark-max-time=0.0000001', test)
711
    result.stderr.fnmatch_lines([
712
        "Saved benchmark data in: *",
713
    ])
714
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].listdir('0001_*.json')[0].read())
715
716
717
def test_bogus_min_time(testdir):
718
    test = testdir.makepyfile(SIMPLE_TEST)
719
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-time=bogus', test)
720
    result.stderr.fnmatch_lines([
721
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
722
        "py*: error: argument --benchmark-min-time: Invalid decimal value 'bogus': InvalidOperation*",
723
    ])
724
725
726
def test_disable_gc(testdir):
727
    test = testdir.makepyfile(SIMPLE_TEST)
728
    result = testdir.runpytest('--benchmark-disable-gc', test)
729
    result.stdout.fnmatch_lines([
730
        "*collected 2 items",
731
        "test_disable_gc.py ..",
732
        "* benchmark: 2 tests *",
733
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
734
        "------*",
735
        "test_fast          *",
736
        "test_slow          *",
737
        "------*",
738
        "*====== 2 passed* seconds ======*",
739
    ])
740
741
742
def test_custom_timer(testdir):
743
    test = testdir.makepyfile(SIMPLE_TEST)
744
    result = testdir.runpytest('--benchmark-timer=time.time', test)
745
    result.stdout.fnmatch_lines([
746
        "*collected 2 items",
747
        "test_custom_timer.py ..",
748
        "* benchmark: 2 tests *",
749
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
750
        "------*",
751
        "test_fast          *",
752
        "test_slow          *",
753
        "------*",
754
        "*====== 2 passed* seconds ======*",
755
    ])
756
757
758
def test_bogus_timer(testdir):
759
    test = testdir.makepyfile(SIMPLE_TEST)
760
    result = testdir.runpytest('--benchmark-timer=bogus', test)
761
    result.stderr.fnmatch_lines([
762
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
763
        "py*: error: argument --benchmark-timer: Value for --benchmark-timer must be in dotted form. Eg: "
764
        "'module.attr'.",
765
    ])
766
767
768
def test_sort_by_mean(testdir):
769
    test = testdir.makepyfile(SIMPLE_TEST)
770
    result = testdir.runpytest('--benchmark-sort=mean', test)
771
    result.stdout.fnmatch_lines([
772
        "*collected 2 items",
773
        "test_sort_by_mean.py ..",
774
        "* benchmark: 2 tests *",
775
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
776
        "------*",
777
        "test_fast          *",
778
        "test_slow          *",
779
        "------*",
780
        "*====== 2 passed* seconds ======*",
781
    ])
782
783
784
def test_bogus_sort(testdir):
785
    test = testdir.makepyfile(SIMPLE_TEST)
786
    result = testdir.runpytest('--benchmark-sort=bogus', test)
787
    result.stderr.fnmatch_lines([
788
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
789
        "py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one "
790
        "of: 'min', 'max', 'mean', 'stddev', 'name', 'fullname'."
791
    ])
792
793
794
def test_xdist(testdir):
795
    pytest.importorskip('xdist')
796
    test = testdir.makepyfile(SIMPLE_TEST)
797
    result = testdir.runpytest('--doctest-modules', '-n', '1', '-rw', test)
798
    result.stdout.fnmatch_lines([
799
        "* Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be "
800
        "performed reliably in a parallelized environment.",
801
    ])
802
803
804
def test_xdist_verbose(testdir):
805
    pytest.importorskip('xdist')
806
    test = testdir.makepyfile(SIMPLE_TEST)
807
    result = testdir.runpytest('--doctest-modules', '-n', '1', '--benchmark-verbose', test)
808
    result.stderr.fnmatch_lines([
809
        "------*",
810
        " WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed "
811
        "reliably in a parallelized environment.",
812
        "------*",
813
    ])
814
815
816
def test_cprofile(testdir):
817
    test = testdir.makepyfile(SIMPLE_TEST)
818
    result = testdir.runpytest('--benchmark-cprofile=cumtime', test)
819
    result.stdout.fnmatch_lines([
820
        "============*=========== cProfile information ============*===========",
821
        "Time in s",
822
        "test_cprofile.py::test_fast",
823
        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
824
        # "1	0.0000	0.0000	0.0001	0.0001	test_cprofile0/test_cprofile.py:9(result)",
825
        # "1	0.0001	0.0001	0.0001	0.0001	~:0(<built-in method time.sleep>)",
826
        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
827
        "",
828
        "test_cprofile.py::test_slow",
829
        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
830
        # "1	0.0000	0.0000	0.1002	0.1002	test_cprofile0/test_cprofile.py:15(<lambda>)",
831
        # "1	0.1002	0.1002	0.1002	0.1002	~:0(<built-in method time.sleep>)",
832
        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
833
    ])
834
835
836
def test_abort_broken(testdir):
837
    """
838
    Test that we don't benchmark code that raises exceptions.
839
    """
840
    test = testdir.makepyfile('''
841
"""
842
    >>> print('Yay, doctests!')
843
    Yay, doctests!
844
"""
845
import time
846
import pytest
847
848
def test_bad(benchmark):
849
    @benchmark
850
    def result():
851
        raise Exception()
852
    assert 1 == 1
853
854
def test_bad2(benchmark):
855
    @benchmark
856
    def result():
857
        time.sleep(0.1)
858
    assert 1 == 0
859
860
@pytest.fixture(params=['a', 'b', 'c'])
861
def bad_fixture(request):
862
    raise ImportError()
863
864
def test_ok(benchmark, bad_fixture):
865
    @benchmark
866
    def result():
867
        time.sleep(0.1)
868
    assert 1 == 0
869
''')
870
    result = testdir.runpytest('-vv', test)
871
    result.stdout.fnmatch_lines([
872
        "*collected 5 items",
873
874
        "test_abort_broken.py::test_bad FAILED",
875
        "test_abort_broken.py::test_bad2 FAILED",
876
        "test_abort_broken.py::test_ok[a] ERROR",
877
        "test_abort_broken.py::test_ok[b] ERROR",
878
        "test_abort_broken.py::test_ok[c] ERROR",
879
880
        "*====== ERRORS ======*",
881
        "*______ ERROR at setup of test_ok[[]a[]] ______*",
882
883
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[a]'>>",
884
885
        "    @pytest.fixture(params=['a', 'b', 'c'])",
886
        "    def bad_fixture(request):",
887
        ">       raise ImportError()",
888
        "E       ImportError",
889
890
        "test_abort_broken.py:22: ImportError",
891
        "*______ ERROR at setup of test_ok[[]b[]] ______*",
892
893
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[b]'>>",
894
895
        "    @pytest.fixture(params=['a', 'b', 'c'])",
896
        "    def bad_fixture(request):",
897
        ">       raise ImportError()",
898
        "E       ImportError",
899
900
        "test_abort_broken.py:22: ImportError",
901
        "*______ ERROR at setup of test_ok[[]c[]] ______*",
902
903
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[c]'>>",
904
905
        "    @pytest.fixture(params=['a', 'b', 'c'])",
906
        "    def bad_fixture(request):",
907
        ">       raise ImportError()",
908
        "E       ImportError",
909
910
        "test_abort_broken.py:22: ImportError",
911
        "*====== FAILURES ======*",
912
        "*______ test_bad ______*",
913
914
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
915
916
        "    def test_bad(benchmark):",
917
        ">       @benchmark",
918
        "        def result():",
919
920
        "test_abort_broken.py:*",
921
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
922
        "*",
923
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
924
925
        "    @benchmark",
926
        "    def result():",
927
        ">       raise Exception()",
928
        "E       Exception",
929
930
        "test_abort_broken.py:11: Exception",
931
        "*______ test_bad2 ______*",
932
933
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
934
935
        "    def test_bad2(benchmark):",
936
        "        @benchmark",
937
        "        def result():",
938
        "            time.sleep(0.1)",
939
        ">       assert 1 == 0",
940
        "E       assert 1 == 0",
941
942
        "test_abort_broken.py:18: AssertionError",
943
    ])
944
945
    result.stdout.fnmatch_lines([
946
        "* benchmark: 1 tests *",
947
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
948
        "------*",
949
        "test_bad2           *",
950
        "------*",
951
952
        "*====== 2 failed*, 3 error* seconds ======*",
953
    ])
954
955
956
BASIC_TEST = '''
957
"""
958
Just to make sure the plugin doesn't choke on doctests::
959
    >>> print('Yay, doctests!')
960
    Yay, doctests!
961
"""
962
import time
963
from functools import partial
964
965
import pytest
966
967
def test_fast(benchmark):
968
    @benchmark
969
    def result():
970
        return time.sleep(0.000001)
971
    assert result is None
972
973
def test_slow(benchmark):
974
    assert benchmark(partial(time.sleep, 0.001)) is None
975
976
def test_slower(benchmark):
977
    benchmark(lambda: time.sleep(0.01))
978
979
@pytest.mark.benchmark(min_rounds=2)
980
def test_xfast(benchmark):
981
    benchmark(str)
982
983
def test_fast(benchmark):
984
    benchmark(int)
985
'''
986
987
988
def test_basic(testdir):
989
    test = testdir.makepyfile(BASIC_TEST)
990
    result = testdir.runpytest('-vv', '--doctest-modules', test)
991
    result.stdout.fnmatch_lines([
992
        "*collected 5 items",
993
        "test_basic.py::*test_basic PASSED",
994
        "test_basic.py::test_slow PASSED",
995
        "test_basic.py::test_slower PASSED",
996
        "test_basic.py::test_xfast PASSED",
997
        "test_basic.py::test_fast PASSED",
998
        "",
999
        "* benchmark: 4 tests *",
1000
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1001
        "------*",
1002
        "test_*         *",
1003
        "test_*         *",
1004
        "test_*         *",
1005
        "test_*         *",
1006
        "------*",
1007
        "",
1008
        "*====== 5 passed* seconds ======*",
1009
    ])
1010
1011
1012
def test_skip(testdir):
1013
    test = testdir.makepyfile(BASIC_TEST)
1014
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-skip', test)
1015
    result.stdout.fnmatch_lines([
1016
        "*collected 5 items",
1017
        "test_skip.py::*test_skip PASSED",
1018
        "test_skip.py::test_slow SKIPPED",
1019
        "test_skip.py::test_slower SKIPPED",
1020
        "test_skip.py::test_xfast SKIPPED",
1021
        "test_skip.py::test_fast SKIPPED",
1022
        "*====== 1 passed, 4 skipped* seconds ======*",
1023
    ])
1024
1025
1026
def test_disable(testdir):
1027
    test = testdir.makepyfile(BASIC_TEST)
1028
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test)
1029
    result.stdout.fnmatch_lines([
1030
        "*collected 5 items",
1031
        "test_disable.py::*test_disable PASSED",
1032
        "test_disable.py::test_slow PASSED",
1033
        "test_disable.py::test_slower PASSED",
1034
        "test_disable.py::test_xfast PASSED",
1035
        "test_disable.py::test_fast PASSED",
1036
        "*====== 5 passed * seconds ======*",
1037
    ])
1038
1039
1040
def test_mark_selection(testdir):
1041
    test = testdir.makepyfile(BASIC_TEST)
1042
    result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test)
1043
    result.stdout.fnmatch_lines([
1044
        "*collected 5 items",
1045
        "test_mark_selection.py::test_xfast PASSED",
1046
        "* benchmark: 1 tests *",
1047
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1048
        "------*",
1049
        "test_xfast       *",
1050
        "------*",
1051
        "*====== 4 tests deselected* ======*",
1052
        "*====== 1 passed, 4 deselected* seconds ======*",
1053
    ])
1054
1055
1056
def test_only_benchmarks(testdir):
1057
    test = testdir.makepyfile(BASIC_TEST)
1058
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-only', test)
1059
    result.stdout.fnmatch_lines([
1060
        "*collected 5 items",
1061
        "test_only_benchmarks.py::*test_only_benchmarks SKIPPED",
1062
        "test_only_benchmarks.py::test_slow PASSED",
1063
        "test_only_benchmarks.py::test_slower PASSED",
1064
        "test_only_benchmarks.py::test_xfast PASSED",
1065
        "test_only_benchmarks.py::test_fast PASSED",
1066
        "* benchmark: 4 tests *",
1067
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1068
        "------*",
1069
        "test_*         *",
1070
        "test_*         *",
1071
        "test_*         *",
1072
        "test_*         *",
1073
        "------*",
1074
        "*====== 4 passed, 1 skipped* seconds ======*",
1075
    ])
1076
1077
1078
def test_columns(testdir):
1079
    test = testdir.makepyfile(SIMPLE_TEST)
1080
    result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,iterations,min', test)
1081
    result.stdout.fnmatch_lines([
1082
        "*collected 3 items",
1083
        "test_columns.py ...",
1084
        "* benchmark: 2 tests *",
1085
        "Name (time in ?s) * Max * Iterations * Min *",
1086
        "------*",
1087
    ])
1088
1089
def test_columns_percentiles(testdir):
1090
    test = testdir.makepyfile(SIMPLE_TEST)
1091
    result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,p99,iterations,min', test)
1092
    result.stdout.fnmatch_lines([
1093
        "*collected 3 items",
1094
        "test_columns_percentiles.py ...",
1095
        "* benchmark: 2 tests *",
1096
        "Name (time in ?s) * Max * P99 * Iterations * Min *",
1097
        "------*",
1098
    ])
1099