Completed
Pull Request — master (#59)
by
unknown
01:30
created

test_cprofile()   A

Complexity

Conditions 1

Size

Total Lines 14

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
c 0
b 0
f 0
dl 0
loc 14
rs 9.4285
1
import json
2
import platform
3
4
import pytest
5
6
pytest_plugins = 'pytester',
7
platform
8
9
10
def test_help(testdir):
11
    result = testdir.runpytest('--help')
12
    result.stdout.fnmatch_lines([
13
        "*", "*",
14
        "benchmark:",
15
        "  --benchmark-min-time=SECONDS",
16
        "                        Minimum time per round in seconds. Default: '0.000005'",
17
        "  --benchmark-max-time=SECONDS",
18
        "                        Maximum run time per test - it will be repeated until",
19
        "                        this total time is reached. It may be exceeded if test",
20
        "                        function is very slow or --benchmark-min-rounds is",
21
        "                        large (it takes precedence). Default: '1.0'",
22
        "  --benchmark-min-rounds=NUM",
23
        "                        Minimum rounds, even if total time would exceed",
24
        "                        `--max-time`. Default: 5",
25
        "  --benchmark-timer=FUNC",
26
        "                        Timer to use when measuring time. Default:*",
27
        "  --benchmark-calibration-precision=NUM",
28
        "                        Precision to use when calibrating number of",
29
        "                        iterations. Precision of 10 will make the timer look",
30
        "                        10 times more accurate, at a cost of less precise",
31
        "                        measure of deviations. Default: 10",
32
        "  --benchmark-warmup=[KIND]",
33
        "                        Activates warmup. Will run the test function up to",
34
        "                        number of times in the calibration phase. See",
35
        "                        `--benchmark-warmup-iterations`. Note: Even the warmup",
36
        "                        phase obeys --benchmark-max-time. Available KIND:",
37
        "                        'auto', 'off', 'on'. Default: 'auto' (automatically",
38
        "                        activate on PyPy).",
39
        "  --benchmark-warmup-iterations=NUM",
40
        "                        Max number of iterations to run in the warmup phase.",
41
        "                        Default: 100000",
42
        "  --benchmark-disable-gc",
43
        "                        Disable GC during benchmarks.",
44
        "  --benchmark-skip      Skip running any tests that contain benchmarks.",
45
        "  --benchmark-only      Only run benchmarks.",
46
        "  --benchmark-save=NAME",
47
        "                        Save the current run into 'STORAGE-",
48
        "                        PATH/counter_NAME.json'.",
49
        "  --benchmark-autosave  Autosave the current run into 'STORAGE-",
50
        "                        PATH/counter_*.json",
51
        "  --benchmark-save-data",
52
        "                        Use this to make --benchmark-save and --benchmark-",
53
        "                        autosave include all the timing data, not just the",
54
        "                        stats.",
55
        "  --benchmark-json=PATH",
56
        "                        Dump a JSON report into PATH. Note that this will",
57
        "                        include the complete data (all the timings, not just",
58
        "                        the stats).",
59
        "  --benchmark-compare=[NUM]",
60
        "                        Compare the current run against run NUM or the latest",
61
        "                        saved run if unspecified.",
62
        "  --benchmark-compare-fail=EXPR=[EXPR=...]",
63
        "                        Fail test if performance regresses according to given",
64
        "                        EXPR (eg: min:5% or mean:0.001 for number of seconds).",
65
        "                        Can be used multiple times.",
66
        "  --benchmark-cprofile=COLUMN",
67
        "                        If specified measure one run with cProfile and stores",
68
        "                        10 top functions. Argument is a column to sort by.",
69
        "                        Available columns: 'ncallls_recursion', 'ncalls',",
70
        "                        'tottime', 'tottime_per', 'cumtime', 'cumtime_per',",
71
        "                        'function_name'.",
72
        "  --benchmark-storage=STORAGE-PATH",
73
        "                        Specify a different path to store the runs (when",
74
        "                        --benchmark-save or --benchmark-autosave are used).",
75
        "                        Default: './.benchmarks'",
76
        "  --benchmark-verbose   Dump diagnostic and progress information.",
77
        "  --benchmark-sort=COL  Column to sort on. Can be one of: 'min', 'max',",
78
        "                        'mean', 'stddev', 'name', 'fullname'. Default: 'min'",
79
        "  --benchmark-group-by=LABEL",
80
        "                        How to group tests. Can be one of: 'group', 'name',",
81
        "                        'fullname', 'func', 'fullfunc', 'param' or",
82
        "                        'param:NAME', where NAME is the name passed to",
83
        "                        @pytest.parametrize. Default: 'group'",
84
        "  --benchmark-columns=LABELS",
85
        "                        Comma-separated list of columns to show in the result",
86
        "                        table. Default: 'min, max, mean, stddev, median, iqr,",
87
        "                        outliers, rounds, iterations'",
88
        "  --benchmark-histogram=[FILENAME-PREFIX]",
89
        "                        Plot graphs of min/max/avg/stddev over time in",
90
        "                        FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX",
91
        "                        contains slashes ('/') then directories will be",
92
        "                        created. Default: '*'",
93
        "*",
94
    ])
95
96
97
def test_groups(testdir):
98
    test = testdir.makepyfile('''"""
99
    >>> print('Yay, doctests!')
100
    Yay, doctests!
101
"""
102
import time
103
import pytest
104
105
def test_fast(benchmark):
106
    benchmark(lambda: time.sleep(0.000001))
107
    assert 1 == 1
108
109
def test_slow(benchmark):
110
    benchmark(lambda: time.sleep(0.001))
111
    assert 1 == 1
112
113
@pytest.mark.benchmark(group="A")
114
def test_slower(benchmark):
115
    benchmark(lambda: time.sleep(0.01))
116
    assert 1 == 1
117
118
@pytest.mark.benchmark(group="A", warmup=True)
119
def test_xfast(benchmark):
120
    benchmark(lambda: None)
121
    assert 1 == 1
122
''')
123
    result = testdir.runpytest('-vv', '--doctest-modules', test)
124
    result.stdout.fnmatch_lines([
125
        "*collected 5 items",
126
        "*",
127
        "test_groups.py::*test_groups PASSED",
128
        "test_groups.py::test_fast PASSED",
129
        "test_groups.py::test_slow PASSED",
130
        "test_groups.py::test_slower PASSED",
131
        "test_groups.py::test_xfast PASSED",
132
        "*",
133
        "* benchmark: 2 tests *",
134
        "*",
135
        "* benchmark 'A': 2 tests *",
136
        "*",
137
        "*====== 5 passed* seconds ======*",
138
    ])
139
140
141
SIMPLE_TEST = '''
142
"""
143
    >>> print('Yay, doctests!')
144
    Yay, doctests!
145
"""
146
import time
147
import pytest
148
149
def test_fast(benchmark):
150
    @benchmark
151
    def result():
152
        return time.sleep(0.000001)
153
    assert result == None
154
155
def test_slow(benchmark):
156
    benchmark(lambda: time.sleep(0.1))
157
    assert 1 == 1
158
'''
159
160
GROUPING_TEST = '''
161
import pytest
162
163
@pytest.mark.parametrize("foo", range(2))
164
@pytest.mark.benchmark(group="A")
165
def test_a(benchmark, foo):
166
    benchmark(str)
167
168
@pytest.mark.parametrize("foo", range(2))
169
@pytest.mark.benchmark(group="B")
170
def test_b(benchmark, foo):
171
    benchmark(int)
172
'''
173
174
GROUPING_PARAMS_TEST = '''
175
import pytest
176
177
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
178
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
179
@pytest.mark.benchmark(group="A")
180
def test_a(benchmark, foo, bar):
181
    benchmark(str)
182
183
184
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
185
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
186
@pytest.mark.benchmark(group="B")
187
def test_b(benchmark, foo, bar):
188
    benchmark(int)
189
'''
190
191
192
def test_group_by_name(testdir):
193
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
194
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
195
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'name', test_x, test_y)
196
    result.stdout.fnmatch_lines([
197
        '*', '*', '*', '*', '*',
198
        "* benchmark 'test_a[[]0[]]': 2 tests *",
199
        'Name (time in ?s)     *',
200
        '----------------------*',
201
        'test_a[[]0[]]             *',
202
        'test_a[[]0[]]             *',
203
        '----------------------*',
204
        '*',
205
        "* benchmark 'test_a[[]1[]]': 2 tests *",
206
        'Name (time in ?s)     *',
207
        '----------------------*',
208
        'test_a[[]1[]]             *',
209
        'test_a[[]1[]]             *',
210
        '----------------------*',
211
        '*',
212
        "* benchmark 'test_b[[]0[]]': 2 tests *",
213
        'Name (time in ?s)     *',
214
        '----------------------*',
215
        'test_b[[]0[]]             *',
216
        'test_b[[]0[]]             *',
217
        '----------------------*',
218
        '*',
219
        "* benchmark 'test_b[[]1[]]': 2 tests *",
220
        'Name (time in ?s)     *',
221
        '----------------------*',
222
        'test_b[[]1[]]             *',
223
        'test_b[[]1[]]             *',
224
        '----------------------*',
225
    ])
226
227
228
def test_group_by_func(testdir):
229
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
230
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
231
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'func', test_x, test_y)
232
    result.stdout.fnmatch_lines([
233
        '*', '*', '*', '*',
234
        "* benchmark 'test_a': 4 tests *",
235
        'Name (time in ?s)     *',
236
        '----------------------*',
237
        'test_a[[]*[]]             *',
238
        'test_a[[]*[]]             *',
239
        'test_a[[]*[]]             *',
240
        'test_a[[]*[]]             *',
241
        '----------------------*',
242
        '*',
243
        "* benchmark 'test_b': 4 tests *",
244
        'Name (time in ?s)     *',
245
        '----------------------*',
246
        'test_b[[]*[]]             *',
247
        'test_b[[]*[]]             *',
248
        'test_b[[]*[]]             *',
249
        'test_b[[]*[]]             *',
250
        '----------------------*',
251
        '*', '*',
252
        '============* 8 passed* seconds ============*',
253
    ])
254
255
256
def test_group_by_fullfunc(testdir):
257
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
258
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
259
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullfunc', test_x, test_y)
260
    result.stdout.fnmatch_lines([
261
        '*', '*', '*', '*', '*',
262
        "* benchmark 'test_x.py::test_a': 2 tests *",
263
        'Name (time in ?s) *',
264
        '------------------*',
265
        'test_a[[]*[]]         *',
266
        'test_a[[]*[]]         *',
267
        '------------------*',
268
        '',
269
        "* benchmark 'test_x.py::test_b': 2 tests *",
270
        'Name (time in ?s) *',
271
        '------------------*',
272
        'test_b[[]*[]]         *',
273
        'test_b[[]*[]]         *',
274
        '------------------*',
275
        '',
276
        "* benchmark 'test_y.py::test_a': 2 tests *",
277
        'Name (time in ?s) *',
278
        '------------------*',
279
        'test_a[[]*[]]         *',
280
        'test_a[[]*[]]         *',
281
        '------------------*',
282
        '',
283
        "* benchmark 'test_y.py::test_b': 2 tests *",
284
        'Name (time in ?s) *',
285
        '------------------*',
286
        'test_b[[]*[]]         *',
287
        'test_b[[]*[]]         *',
288
        '------------------*',
289
        '',
290
        '(*) Outliers: 1 Standard Deviation from M*',
291
        '============* 8 passed* seconds ============*',
292
    ])
293
294
295
def test_group_by_param_all(testdir):
296
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
297
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
298
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x, test_y)
299
    result.stdout.fnmatch_lines([
300
        '*', '*', '*', '*', '*',
301
        "* benchmark '0': 4 tests *",
302
        'Name (time in ?s)  *',
303
        '-------------------*',
304
        'test_*[[]0[]]          *',
305
        'test_*[[]0[]]          *',
306
        'test_*[[]0[]]          *',
307
        'test_*[[]0[]]          *',
308
        '-------------------*',
309
        '',
310
        "* benchmark '1': 4 tests *",
311
        'Name (time in ?s) *',
312
        '------------------*',
313
        'test_*[[]1[]]         *',
314
        'test_*[[]1[]]         *',
315
        'test_*[[]1[]]         *',
316
        'test_*[[]1[]]         *',
317
        '------------------*',
318
        '',
319
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
320
        'Quartile.',
321
        '============* 8 passed* seconds ============*',
322
    ])
323
324
def test_group_by_param_select(testdir):
325
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
326
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
327
                               '--benchmark-group-by', 'param:foo',
328
                               '--benchmark-sort', 'fullname',
329
                               test_x)
330
    result.stdout.fnmatch_lines([
331
        '*', '*', '*', '*', '*',
332
        "* benchmark 'foo=foo1': 4 tests *",
333
        'Name (time in ?s)  *',
334
        '-------------------*',
335
        'test_a[[]foo1-bar1[]]    *',
336
        'test_a[[]foo1-bar2[]]    *',
337
        'test_b[[]foo1-bar1[]]    *',
338
        'test_b[[]foo1-bar2[]]    *',
339
        '-------------------*',
340
        '',
341
        "* benchmark 'foo=foo2': 4 tests *",
342
        'Name (time in ?s) *',
343
        '------------------*',
344
        'test_a[[]foo2-bar1[]]    *',
345
        'test_a[[]foo2-bar2[]]    *',
346
        'test_b[[]foo2-bar1[]]    *',
347
        'test_b[[]foo2-bar2[]]    *',
348
        '------------------*',
349
        '',
350
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
351
        'Quartile.',
352
        '============* 8 passed* seconds ============*',
353
    ])
354
355
356
def test_group_by_param_select_multiple(testdir):
357
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
358
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
359
                               '--benchmark-group-by', 'param:foo,param:bar',
360
                               '--benchmark-sort', 'fullname',
361
                               test_x)
362
    result.stdout.fnmatch_lines([
363
        '*', '*', '*', '*', '*',
364
        "* benchmark 'foo=foo1 bar=bar1': 2 tests *",
365
        'Name (time in ?s)  *',
366
        '-------------------*',
367
        'test_a[[]foo1-bar1[]]    *',
368
        'test_b[[]foo1-bar1[]]    *',
369
        '-------------------*',
370
        '',
371
        "* benchmark 'foo=foo1 bar=bar2': 2 tests *",
372
        'Name (time in ?s)  *',
373
        '-------------------*',
374
        'test_a[[]foo1-bar2[]]    *',
375
        'test_b[[]foo1-bar2[]]    *',
376
        '-------------------*',
377
        '',
378
        "* benchmark 'foo=foo2 bar=bar1': 2 tests *",
379
        'Name (time in ?s) *',
380
        '------------------*',
381
        'test_a[[]foo2-bar1[]]    *',
382
        'test_b[[]foo2-bar1[]]    *',
383
        '-------------------*',
384
        '',
385
        "* benchmark 'foo=foo2 bar=bar2': 2 tests *",
386
        'Name (time in ?s)  *',
387
        '-------------------*',
388
        'test_a[[]foo2-bar2[]]    *',
389
        'test_b[[]foo2-bar2[]]    *',
390
        '------------------*',
391
        '',
392
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
393
        'Quartile.',
394
        '============* 8 passed* seconds ============*',
395
    ])
396
397
def test_group_by_fullname(testdir):
398
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
399
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
400
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullname', test_x, test_y)
401
    result.stdout.fnmatch_lines_random([
402
        "* benchmark 'test_x.py::test_a[[]0[]]': 1 tests *",
403
        "* benchmark 'test_x.py::test_a[[]1[]]': 1 tests *",
404
        "* benchmark 'test_x.py::test_b[[]0[]]': 1 tests *",
405
        "* benchmark 'test_x.py::test_b[[]1[]]': 1 tests *",
406
        "* benchmark 'test_y.py::test_a[[]0[]]': 1 tests *",
407
        "* benchmark 'test_y.py::test_a[[]1[]]': 1 tests *",
408
        "* benchmark 'test_y.py::test_b[[]0[]]': 1 tests *",
409
        "* benchmark 'test_y.py::test_b[[]1[]]': 1 tests *",
410
        '============* 8 passed* seconds ============*',
411
    ])
412
413
414
def test_double_use(testdir):
415
    test = testdir.makepyfile('''
416
def test_a(benchmark):
417
    benchmark(lambda: None)
418
    benchmark.pedantic(lambda: None)
419
420
def test_b(benchmark):
421
    benchmark.pedantic(lambda: None)
422
    benchmark(lambda: None)
423
''')
424
    result = testdir.runpytest(test, '--tb=line')
425
    result.stdout.fnmatch_lines([
426
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark(...) mode.',
427
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark.pedantic(...) mode.',
428
    ])
429
430
431
def test_conflict_between_only_and_skip(testdir):
432
    test = testdir.makepyfile(SIMPLE_TEST)
433
    result = testdir.runpytest('--benchmark-only', '--benchmark-skip', test)
434
    result.stderr.fnmatch_lines([
435
        "ERROR: Can't have both --benchmark-only and --benchmark-skip options."
436
    ])
437
438
439
def test_conflict_between_only_and_disable(testdir):
440
    test = testdir.makepyfile(SIMPLE_TEST)
441
    result = testdir.runpytest('--benchmark-only', '--benchmark-disable', test)
442
    result.stderr.fnmatch_lines([
443
        "ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
444
        "automatically activated if xdist is on or you're missing the statistics dependency."
445
    ])
446
447
448
def test_max_time_min_rounds(testdir):
449
    test = testdir.makepyfile(SIMPLE_TEST)
450
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test)
451
    result.stdout.fnmatch_lines([
452
        "*collected 3 items",
453
        "test_max_time_min_rounds.py ...",
454
        "* benchmark: 2 tests *",
455
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
456
        "------*",
457
        "test_fast          * 1  *",
458
        "test_slow          * 1  *",
459
        "------*",
460
        "*====== 3 passed* seconds ======*",
461
    ])
462
463
464
def test_max_time(testdir):
465
    test = testdir.makepyfile(SIMPLE_TEST)
466
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', test)
467
    result.stdout.fnmatch_lines([
468
        "*collected 3 items",
469
        "test_max_time.py ...",
470
        "* benchmark: 2 tests *",
471
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
472
        "------*",
473
        "test_fast          * 5  *",
474
        "test_slow          * 5  *",
475
        "------*",
476
        "*====== 3 passed* seconds ======*",
477
    ])
478
479
480
def test_bogus_max_time(testdir):
481
    test = testdir.makepyfile(SIMPLE_TEST)
482
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=bogus', test)
483
    result.stderr.fnmatch_lines([
484
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
485
        "py*: error: argument --benchmark-max-time: Invalid decimal value 'bogus': InvalidOperation*",
486
    ])
487
488
489
@pytest.mark.skipif("platform.python_implementation() == 'PyPy'")
490
def test_pep418_timer(testdir):
491
    test = testdir.makepyfile(SIMPLE_TEST)
492
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
493
                               '--benchmark-timer=pep418.perf_counter', test)
494
    result.stdout.fnmatch_lines([
495
        "* (defaults: timer=*.perf_counter*",
496
    ])
497
498
499
def test_bad_save(testdir):
500
    test = testdir.makepyfile(SIMPLE_TEST)
501
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=asd:f?', test)
502
    result.stderr.fnmatch_lines([
503
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
504
        "py*: error: argument --benchmark-save: Must not contain any of these characters: /:*?<>|\\ (it has ':?')",
505
    ])
506
507
508
def test_bad_save_2(testdir):
509
    test = testdir.makepyfile(SIMPLE_TEST)
510
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=', test)
511
    result.stderr.fnmatch_lines([
512
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
513
        "py*: error: argument --benchmark-save: Can't be empty.",
514
    ])
515
516
517
def test_bad_compare_fail(testdir):
518
    test = testdir.makepyfile(SIMPLE_TEST)
519
    result = testdir.runpytest('--doctest-modules', '--benchmark-compare-fail=?', test)
520
    result.stderr.fnmatch_lines([
521
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
522
        "py*: error: argument --benchmark-compare-fail: Could not parse value: '?'.",
523
    ])
524
525
526
def test_bad_rounds(testdir):
527
    test = testdir.makepyfile(SIMPLE_TEST)
528
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=asd', test)
529
    result.stderr.fnmatch_lines([
530
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
531
        "py*: error: argument --benchmark-min-rounds: invalid literal for int() with base 10: 'asd'",
532
    ])
533
534
535
def test_bad_rounds_2(testdir):
536
    test = testdir.makepyfile(SIMPLE_TEST)
537
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=0', test)
538
    result.stderr.fnmatch_lines([
539
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
540
        "py*: error: argument --benchmark-min-rounds: Value for --benchmark-rounds must be at least 1.",
541
    ])
542
543
544
def test_compare(testdir):
545
    test = testdir.makepyfile(SIMPLE_TEST)
546
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
547
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
548
                               '--benchmark-compare-fail=min:0.1', test)
549
    result.stderr.fnmatch_lines([
550
        "Comparing against benchmarks from: *0001_unversioned_*.json",
551
    ])
552
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
553
                               '--benchmark-compare-fail=min:1%', test)
554
    result.stderr.fnmatch_lines([
555
        "Comparing against benchmarks from: *0001_unversioned_*.json",
556
    ])
557
558
559
def test_compare_last(testdir):
560
    test = testdir.makepyfile(SIMPLE_TEST)
561
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
562
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
563
                               '--benchmark-compare-fail=min:0.1', test)
564
    result.stderr.fnmatch_lines([
565
        "Comparing against benchmarks from: *0001_unversioned_*.json",
566
    ])
567
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
568
                               '--benchmark-compare-fail=min:1%', test)
569
    result.stderr.fnmatch_lines([
570
        "Comparing against benchmarks from: *0001_unversioned_*.json",
571
    ])
572
573
574
def test_compare_non_existing(testdir):
575
    test = testdir.makepyfile(SIMPLE_TEST)
576
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
577
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', '-rw',
578
                               test)
579
    result.stdout.fnmatch_lines([
580
        "WBENCHMARK-C1 * Can't compare. No benchmark files * '0002'.",
581
    ])
582
583
584
def test_compare_non_existing_verbose(testdir):
585
    test = testdir.makepyfile(SIMPLE_TEST)
586
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
587
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002',
588
                               test, '--benchmark-verbose')
589
    result.stderr.fnmatch_lines([
590
        " WARNING: Can't compare. No benchmark files * '0002'.",
591
    ])
592
593
594
def test_compare_no_files(testdir):
595
    test = testdir.makepyfile(SIMPLE_TEST)
596
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
597
                               test, '--benchmark-compare')
598
    result.stdout.fnmatch_lines([
599
         "WBENCHMARK-C2 * Can't compare. No benchmark files in '*'."
600
         " Can't load the previous benchmark."
601
    ])
602
603
604
def test_compare_no_files_verbose(testdir):
605
    test = testdir.makepyfile(SIMPLE_TEST)
606
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
607
                               test, '--benchmark-compare', '--benchmark-verbose')
608
    result.stderr.fnmatch_lines([
609
        " WARNING: Can't compare. No benchmark files in '*'."
610
        " Can't load the previous benchmark."
611
    ])
612
613
614
def test_compare_no_files_match(testdir):
615
    test = testdir.makepyfile(SIMPLE_TEST)
616
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
617
                               test, '--benchmark-compare=1')
618
    result.stdout.fnmatch_lines([
619
        "WBENCHMARK-C1 * Can't compare. No benchmark files in '*' match '1'."
620
    ])
621
622
623
def test_compare_no_files_match_verbose(testdir):
624
    test = testdir.makepyfile(SIMPLE_TEST)
625
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
626
                               test, '--benchmark-compare=1', '--benchmark-verbose')
627
    result.stderr.fnmatch_lines([
628
        " WARNING: Can't compare. No benchmark files in '*' match '1'."
629
    ])
630
631
632
def test_verbose(testdir):
633
    test = testdir.makepyfile(SIMPLE_TEST)
634
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-verbose',
635
                               '-vv', test)
636
    result.stderr.fnmatch_lines([
637
        "  Timer precision: *s",
638
        "  Calibrating to target round *s; will estimate when reaching *s.",
639
        "    Measured * iterations: *s.",
640
        "  Running * rounds x * iterations ...",
641
        "  Ran for *s.",
642
    ])
643
644
645
def test_save(testdir):
646
    test = testdir.makepyfile(SIMPLE_TEST)
647
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
648
                               '--benchmark-max-time=0.0000001', test)
649
    result.stderr.fnmatch_lines([
650
        "Saved benchmark data in: *",
651
    ])
652
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
653
654
655
def test_histogram(testdir):
656
    test = testdir.makepyfile(SIMPLE_TEST)
657
    result = testdir.runpytest('--doctest-modules', '--benchmark-histogram=foobar',
658
                               '--benchmark-max-time=0.0000001', test)
659
    result.stderr.fnmatch_lines([
660
        "Generated histogram: *foobar.svg",
661
    ])
662
    assert [f.basename for f in testdir.tmpdir.listdir("*.svg", sort=True)] == [
663
        'foobar.svg',
664
    ]
665
666
667
def test_autosave(testdir):
668
    test = testdir.makepyfile(SIMPLE_TEST)
669
    result = testdir.runpytest('--doctest-modules', '--benchmark-autosave',
670
                               '--benchmark-max-time=0.0000001', test)
671
    result.stderr.fnmatch_lines([
672
        "Saved benchmark data in: *",
673
    ])
674
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].listdir('0001_*.json')[0].read())
675
676
677
def test_bogus_min_time(testdir):
678
    test = testdir.makepyfile(SIMPLE_TEST)
679
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-time=bogus', test)
680
    result.stderr.fnmatch_lines([
681
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
682
        "py*: error: argument --benchmark-min-time: Invalid decimal value 'bogus': InvalidOperation*",
683
    ])
684
685
686
def test_disable_gc(testdir):
687
    test = testdir.makepyfile(SIMPLE_TEST)
688
    result = testdir.runpytest('--benchmark-disable-gc', test)
689
    result.stdout.fnmatch_lines([
690
        "*collected 2 items",
691
        "test_disable_gc.py ..",
692
        "* benchmark: 2 tests *",
693
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
694
        "------*",
695
        "test_fast          *",
696
        "test_slow          *",
697
        "------*",
698
        "*====== 2 passed* seconds ======*",
699
    ])
700
701
702
def test_custom_timer(testdir):
703
    test = testdir.makepyfile(SIMPLE_TEST)
704
    result = testdir.runpytest('--benchmark-timer=time.time', test)
705
    result.stdout.fnmatch_lines([
706
        "*collected 2 items",
707
        "test_custom_timer.py ..",
708
        "* benchmark: 2 tests *",
709
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
710
        "------*",
711
        "test_fast          *",
712
        "test_slow          *",
713
        "------*",
714
        "*====== 2 passed* seconds ======*",
715
    ])
716
717
718
def test_bogus_timer(testdir):
719
    test = testdir.makepyfile(SIMPLE_TEST)
720
    result = testdir.runpytest('--benchmark-timer=bogus', test)
721
    result.stderr.fnmatch_lines([
722
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
723
        "py*: error: argument --benchmark-timer: Value for --benchmark-timer must be in dotted form. Eg: "
724
        "'module.attr'.",
725
    ])
726
727
728
def test_sort_by_mean(testdir):
729
    test = testdir.makepyfile(SIMPLE_TEST)
730
    result = testdir.runpytest('--benchmark-sort=mean', test)
731
    result.stdout.fnmatch_lines([
732
        "*collected 2 items",
733
        "test_sort_by_mean.py ..",
734
        "* benchmark: 2 tests *",
735
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
736
        "------*",
737
        "test_fast          *",
738
        "test_slow          *",
739
        "------*",
740
        "*====== 2 passed* seconds ======*",
741
    ])
742
743
744
def test_bogus_sort(testdir):
745
    test = testdir.makepyfile(SIMPLE_TEST)
746
    result = testdir.runpytest('--benchmark-sort=bogus', test)
747
    result.stderr.fnmatch_lines([
748
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
749
        "py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one of: 'min', 'max', 'mean', 'stddev', 'name', 'fullname'."
750
    ])
751
752
753
def test_xdist(testdir):
754
    pytest.importorskip('xdist')
755
    test = testdir.makepyfile(SIMPLE_TEST)
756
    result = testdir.runpytest('--doctest-modules', '-n', '1', '-rw', test)
757
    result.stdout.fnmatch_lines([
758
        "WBENCHMARK-U2 * Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be "
759
        "performed reliably in a parallelized environment.",
760
    ])
761
762
763
def test_xdist_verbose(testdir):
764
    pytest.importorskip('xdist')
765
    test = testdir.makepyfile(SIMPLE_TEST)
766
    result = testdir.runpytest('--doctest-modules', '-n', '1', '--benchmark-verbose', test)
767
    result.stderr.fnmatch_lines([
768
        "------*",
769
        " WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed "
770
        "reliably in a parallelized environment.",
771
        "------*",
772
    ])
773
774
775
def test_cprofile(testdir):
776
    test = testdir.makepyfile(SIMPLE_TEST)
777
    result = testdir.runpytest('--benchmark-cprofile=cumtime', test)
778
    result.stdout.fnmatch_lines([
779
        "============================= cProfile information =============================",
780
        "Time in s",
781
        "test_cprofile.py::test_fast",
782
        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
783
        # "1	0.0000	0.0000	0.0001	0.0001	test_cprofile0/test_cprofile.py:9(result)",
784
        # "1	0.0001	0.0001	0.0001	0.0001	~:0(<built-in method time.sleep>)",
785
        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
786
        "",
787
        "test_cprofile.py::test_slow",
788
        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
789
        # "1	0.0000	0.0000	0.1002	0.1002	test_cprofile0/test_cprofile.py:15(<lambda>)",
790
        # "1	0.1002	0.1002	0.1002	0.1002	~:0(<built-in method time.sleep>)",
791
        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
792
    ])
793
794
795
def test_abort_broken(testdir):
796
    """
797
    Test that we don't benchmark code that raises exceptions.
798
    """
799
    test = testdir.makepyfile('''
800
"""
801
    >>> print('Yay, doctests!')
802
    Yay, doctests!
803
"""
804
import time
805
import pytest
806
807
def test_bad(benchmark):
808
    @benchmark
809
    def result():
810
        raise Exception()
811
    assert 1 == 1
812
813
def test_bad2(benchmark):
814
    @benchmark
815
    def result():
816
        time.sleep(0.1)
817
    assert 1 == 0
818
819
@pytest.fixture(params=['a', 'b', 'c'])
820
def bad_fixture(request):
821
    raise ImportError()
822
823
def test_ok(benchmark, bad_fixture):
824
    @benchmark
825
    def result():
826
        time.sleep(0.1)
827
    assert 1 == 0
828
''')
829
    result = testdir.runpytest('-vv', test)
830
    result.stdout.fnmatch_lines([
831
        "*collected 5 items",
832
833
        "test_abort_broken.py::test_bad FAILED",
834
        "test_abort_broken.py::test_bad2 FAILED",
835
        "test_abort_broken.py::test_ok[a] ERROR",
836
        "test_abort_broken.py::test_ok[b] ERROR",
837
        "test_abort_broken.py::test_ok[c] ERROR",
838
839
        "*====== ERRORS ======*",
840
        "*______ ERROR at setup of test_ok[[]a[]] ______*",
841
842
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[a]'>>",
843
844
        "    @pytest.fixture(params=['a', 'b', 'c'])",
845
        "    def bad_fixture(request):",
846
        ">       raise ImportError()",
847
        "E       ImportError",
848
849
        "test_abort_broken.py:22: ImportError",
850
        "*______ ERROR at setup of test_ok[[]b[]] ______*",
851
852
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[b]'>>",
853
854
        "    @pytest.fixture(params=['a', 'b', 'c'])",
855
        "    def bad_fixture(request):",
856
        ">       raise ImportError()",
857
        "E       ImportError",
858
859
        "test_abort_broken.py:22: ImportError",
860
        "*______ ERROR at setup of test_ok[[]c[]] ______*",
861
862
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[c]'>>",
863
864
        "    @pytest.fixture(params=['a', 'b', 'c'])",
865
        "    def bad_fixture(request):",
866
        ">       raise ImportError()",
867
        "E       ImportError",
868
869
        "test_abort_broken.py:22: ImportError",
870
        "*====== FAILURES ======*",
871
        "*______ test_bad ______*",
872
873
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
874
875
        "    def test_bad(benchmark):",
876
        ">       @benchmark",
877
        "        def result():",
878
879
        "test_abort_broken.py:*",
880
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
881
        "*",
882
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
883
884
        "    @benchmark",
885
        "    def result():",
886
        ">       raise Exception()",
887
        "E       Exception",
888
889
        "test_abort_broken.py:11: Exception",
890
        "*______ test_bad2 ______*",
891
892
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
893
894
        "    def test_bad2(benchmark):",
895
        "        @benchmark",
896
        "        def result():",
897
        "            time.sleep(0.1)",
898
        ">       assert 1 == 0",
899
        "E       assert 1 == 0",
900
901
        "test_abort_broken.py:18: AssertionError",
902
        "* benchmark: 1 tests *",
903
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
904
        "------*",
905
        "test_bad2           *",
906
        "------*",
907
908
        "*====== 2 failed*, 3 error* seconds ======*",
909
    ])
910
911
912
BASIC_TEST = '''
913
"""
914
Just to make sure the plugin doesn't choke on doctests::
915
    >>> print('Yay, doctests!')
916
    Yay, doctests!
917
"""
918
import time
919
from functools import partial
920
921
import pytest
922
923
def test_fast(benchmark):
924
    @benchmark
925
    def result():
926
        return time.sleep(0.000001)
927
    assert result is None
928
929
def test_slow(benchmark):
930
    assert benchmark(partial(time.sleep, 0.001)) is None
931
932
def test_slower(benchmark):
933
    benchmark(lambda: time.sleep(0.01))
934
935
@pytest.mark.benchmark(min_rounds=2)
936
def test_xfast(benchmark):
937
    benchmark(str)
938
939
def test_fast(benchmark):
940
    benchmark(int)
941
'''
942
943
944
def test_basic(testdir):
945
    test = testdir.makepyfile(BASIC_TEST)
946
    result = testdir.runpytest('-vv', '--doctest-modules', test)
947
    result.stdout.fnmatch_lines([
948
        "*collected 5 items",
949
        "test_basic.py::*test_basic PASSED",
950
        "test_basic.py::test_slow PASSED",
951
        "test_basic.py::test_slower PASSED",
952
        "test_basic.py::test_xfast PASSED",
953
        "test_basic.py::test_fast PASSED",
954
        "",
955
        "* benchmark: 4 tests *",
956
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
957
        "------*",
958
        "test_*         *",
959
        "test_*         *",
960
        "test_*         *",
961
        "test_*         *",
962
        "------*",
963
        "",
964
        "*====== 5 passed* seconds ======*",
965
    ])
966
967
968
def test_skip(testdir):
969
    test = testdir.makepyfile(BASIC_TEST)
970
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-skip', test)
971
    result.stdout.fnmatch_lines([
972
        "*collected 5 items",
973
        "test_skip.py::*test_skip PASSED",
974
        "test_skip.py::test_slow SKIPPED",
975
        "test_skip.py::test_slower SKIPPED",
976
        "test_skip.py::test_xfast SKIPPED",
977
        "test_skip.py::test_fast SKIPPED",
978
        "*====== 1 passed, 4 skipped* seconds ======*",
979
    ])
980
981
982
def test_disable(testdir):
983
    test = testdir.makepyfile(BASIC_TEST)
984
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test)
985
    result.stdout.fnmatch_lines([
986
        "*collected 5 items",
987
        "test_disable.py::*test_disable PASSED",
988
        "test_disable.py::test_slow PASSED",
989
        "test_disable.py::test_slower PASSED",
990
        "test_disable.py::test_xfast PASSED",
991
        "test_disable.py::test_fast PASSED",
992
        "*====== 5 passed * seconds ======*",
993
    ])
994
995
996
def test_mark_selection(testdir):
997
    test = testdir.makepyfile(BASIC_TEST)
998
    result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test)
999
    result.stdout.fnmatch_lines([
1000
        "*collected 5 items",
1001
        "test_mark_selection.py::test_xfast PASSED",
1002
        "* benchmark: 1 tests *",
1003
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1004
        "------*",
1005
        "test_xfast       *",
1006
        "------*",
1007
        "*====== 4 tests deselected by \"-m 'benchmark'\" ======*",
1008
        "*====== 1 passed, 4 deselected* seconds ======*",
1009
    ])
1010
1011
1012
def test_only_benchmarks(testdir):
1013
    test = testdir.makepyfile(BASIC_TEST)
1014
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-only', test)
1015
    result.stdout.fnmatch_lines([
1016
        "*collected 5 items",
1017
        "test_only_benchmarks.py::*test_only_benchmarks SKIPPED",
1018
        "test_only_benchmarks.py::test_slow PASSED",
1019
        "test_only_benchmarks.py::test_slower PASSED",
1020
        "test_only_benchmarks.py::test_xfast PASSED",
1021
        "test_only_benchmarks.py::test_fast PASSED",
1022
        "* benchmark: 4 tests *",
1023
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1024
        "------*",
1025
        "test_*         *",
1026
        "test_*         *",
1027
        "test_*         *",
1028
        "test_*         *",
1029
        "------*",
1030
        "*====== 4 passed, 1 skipped* seconds ======*",
1031
    ])
1032
1033
def test_columns(testdir):
1034
    test = testdir.makepyfile(SIMPLE_TEST)
1035
    result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,iterations,min', test)
1036
    result.stdout.fnmatch_lines([
1037
        "*collected 3 items",
1038
        "test_columns.py ...",
1039
        "* benchmark: 2 tests *",
1040
        "Name (time in ?s) * Max * Iterations * Min *",
1041
        "------*",
1042
    ])
1043