Completed
Pull Request — master (#61)
by
unknown
01:10
created

test_save_extra_info()   A

Complexity

Conditions 3

Size

Total Lines 15

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 0
Metric Value
cc 3
c 1
b 0
f 0
dl 0
loc 15
rs 9.4285
1
import json
2
import platform
3
4
import pytest
5
6
pytest_plugins = 'pytester',
7
platform
8
9
10
def test_help(testdir):
11
    result = testdir.runpytest('--help')
12
    result.stdout.fnmatch_lines([
13
        "*", "*",
14
        "benchmark:",
15
        "  --benchmark-min-time=SECONDS",
16
        "                        Minimum time per round in seconds. Default: '0.000005'",
17
        "  --benchmark-max-time=SECONDS",
18
        "                        Maximum run time per test - it will be repeated until",
19
        "                        this total time is reached. It may be exceeded if test",
20
        "                        function is very slow or --benchmark-min-rounds is",
21
        "                        large (it takes precedence). Default: '1.0'",
22
        "  --benchmark-min-rounds=NUM",
23
        "                        Minimum rounds, even if total time would exceed",
24
        "                        `--max-time`. Default: 5",
25
        "  --benchmark-timer=FUNC",
26
        "                        Timer to use when measuring time. Default:*",
27
        "  --benchmark-calibration-precision=NUM",
28
        "                        Precision to use when calibrating number of",
29
        "                        iterations. Precision of 10 will make the timer look",
30
        "                        10 times more accurate, at a cost of less precise",
31
        "                        measure of deviations. Default: 10",
32
        "  --benchmark-warmup=[KIND]",
33
        "                        Activates warmup. Will run the test function up to",
34
        "                        number of times in the calibration phase. See",
35
        "                        `--benchmark-warmup-iterations`. Note: Even the warmup",
36
        "                        phase obeys --benchmark-max-time. Available KIND:",
37
        "                        'auto', 'off', 'on'. Default: 'auto' (automatically",
38
        "                        activate on PyPy).",
39
        "  --benchmark-warmup-iterations=NUM",
40
        "                        Max number of iterations to run in the warmup phase.",
41
        "                        Default: 100000",
42
        "  --benchmark-disable-gc",
43
        "                        Disable GC during benchmarks.",
44
        "  --benchmark-skip      Skip running any tests that contain benchmarks.",
45
        "  --benchmark-only      Only run benchmarks.",
46
        "  --benchmark-save=NAME",
47
        "                        Save the current run into 'STORAGE-",
48
        "                        PATH/counter_NAME.json'.",
49
        "  --benchmark-autosave  Autosave the current run into 'STORAGE-PATH/counter*",
50
        "                        *.json",
51
        "  --benchmark-save-data",
52
        "                        Use this to make --benchmark-save and --benchmark-",
53
        "                        autosave include all the timing data, not just the",
54
        "                        stats.",
55
        "  --benchmark-json=PATH",
56
        "                        Dump a JSON report into PATH. Note that this will",
57
        "                        include the complete data (all the timings, not just",
58
        "                        the stats).",
59
        "  --benchmark-compare=[NUM|_ID]",
60
        "                        Compare the current run against run NUM (or prefix of",
61
        "                        _id in elasticsearch) or the latest saved run if",
62
        "                        unspecified.",
63
        "  --benchmark-compare-fail=EXPR=[EXPR=...]",
64
        "                        Fail test if performance regresses according to given",
65
        "                        EXPR (eg: min:5% or mean:0.001 for number of seconds).",
66
        "                        Can be used multiple times.",
67
        "  --benchmark-cprofile=COLUMN",
68
        "                        If specified measure one run with cProfile and stores",
69
        "                        10 top functions. Argument is a column to sort by.",
70
        "                        Available columns: 'ncallls_recursion', 'ncalls',",
71
        "                        'tottime', 'tottime_per', 'cumtime', 'cumtime_per',",
72
        "                        'function_name'.",
73
        "  --benchmark-storage=URI",
74
        "                        Specify a path to store the runs as uri in form",
75
        "                        file://path or elasticsearch+http[s]://host1,host2/[in",
76
        "                        dex/doctype?project_name=Project] (when --benchmark-",
77
        "                        save or --benchmark-autosave are used). For backwards",
78
        "                        compatibility unexpected values are converted to",
79
        "                        file://<value>. Default: 'file://./.benchmarks'.",
80
        "  --benchmark-verbose   Dump diagnostic and progress information.",
81
        "  --benchmark-sort=COL  Column to sort on. Can be one of: 'min', 'max',",
82
        "                        'mean', 'stddev', 'name', 'fullname'. Default: 'min'",
83
        "  --benchmark-group-by=LABEL",
84
        "                        How to group tests. Can be one of: 'group', 'name',",
85
        "                        'fullname', 'func', 'fullfunc', 'param' or",
86
        "                        'param:NAME', where NAME is the name passed to",
87
        "                        @pytest.parametrize. Default: 'group'",
88
        "  --benchmark-columns=LABELS",
89
        "                        Comma-separated list of columns to show in the result",
90
        "                        table. Default: 'min, max, mean, stddev, median, iqr,",
91
        "                        outliers, rounds, iterations'",
92
        "  --benchmark-histogram=[FILENAME-PREFIX]",
93
        "                        Plot graphs of min/max/avg/stddev over time in",
94
        "                        FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX",
95
        "                        contains slashes ('/') then directories will be",
96
        "                        created. Default: '*'",
97
        "*",
98
    ])
99
100
101
def test_groups(testdir):
102
    test = testdir.makepyfile('''"""
103
    >>> print('Yay, doctests!')
104
    Yay, doctests!
105
"""
106
import time
107
import pytest
108
109
def test_fast(benchmark):
110
    benchmark(lambda: time.sleep(0.000001))
111
    assert 1 == 1
112
113
def test_slow(benchmark):
114
    benchmark(lambda: time.sleep(0.001))
115
    assert 1 == 1
116
117
@pytest.mark.benchmark(group="A")
118
def test_slower(benchmark):
119
    benchmark(lambda: time.sleep(0.01))
120
    assert 1 == 1
121
122
@pytest.mark.benchmark(group="A", warmup=True)
123
def test_xfast(benchmark):
124
    benchmark(lambda: None)
125
    assert 1 == 1
126
''')
127
    result = testdir.runpytest('-vv', '--doctest-modules', test)
128
    result.stdout.fnmatch_lines([
129
        "*collected 5 items",
130
        "*",
131
        "test_groups.py::*test_groups PASSED",
132
        "test_groups.py::test_fast PASSED",
133
        "test_groups.py::test_slow PASSED",
134
        "test_groups.py::test_slower PASSED",
135
        "test_groups.py::test_xfast PASSED",
136
        "*",
137
        "* benchmark: 2 tests *",
138
        "*",
139
        "* benchmark 'A': 2 tests *",
140
        "*",
141
        "*====== 5 passed* seconds ======*",
142
    ])
143
144
145
SIMPLE_TEST = '''
146
"""
147
    >>> print('Yay, doctests!')
148
    Yay, doctests!
149
"""
150
import time
151
import pytest
152
153
def test_fast(benchmark):
154
    @benchmark
155
    def result():
156
        return time.sleep(0.000001)
157
    assert result == None
158
159
def test_slow(benchmark):
160
    benchmark(lambda: time.sleep(0.1))
161
    assert 1 == 1
162
'''
163
164
GROUPING_TEST = '''
165
import pytest
166
167
@pytest.mark.parametrize("foo", range(2))
168
@pytest.mark.benchmark(group="A")
169
def test_a(benchmark, foo):
170
    benchmark(str)
171
172
@pytest.mark.parametrize("foo", range(2))
173
@pytest.mark.benchmark(group="B")
174
def test_b(benchmark, foo):
175
    benchmark(int)
176
'''
177
178
GROUPING_PARAMS_TEST = '''
179
import pytest
180
181
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
182
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
183
@pytest.mark.benchmark(group="A")
184
def test_a(benchmark, foo, bar):
185
    benchmark(str)
186
187
188
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
189
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
190
@pytest.mark.benchmark(group="B")
191
def test_b(benchmark, foo, bar):
192
    benchmark(int)
193
'''
194
195
196
def test_group_by_name(testdir):
197
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
198
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
199
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'name', test_x, test_y)
200
    result.stdout.fnmatch_lines([
201
        '*', '*', '*', '*', '*',
202
        "* benchmark 'test_a[[]0[]]': 2 tests *",
203
        'Name (time in ?s)     *',
204
        '----------------------*',
205
        'test_a[[]0[]]             *',
206
        'test_a[[]0[]]             *',
207
        '----------------------*',
208
        '*',
209
        "* benchmark 'test_a[[]1[]]': 2 tests *",
210
        'Name (time in ?s)     *',
211
        '----------------------*',
212
        'test_a[[]1[]]             *',
213
        'test_a[[]1[]]             *',
214
        '----------------------*',
215
        '*',
216
        "* benchmark 'test_b[[]0[]]': 2 tests *",
217
        'Name (time in ?s)     *',
218
        '----------------------*',
219
        'test_b[[]0[]]             *',
220
        'test_b[[]0[]]             *',
221
        '----------------------*',
222
        '*',
223
        "* benchmark 'test_b[[]1[]]': 2 tests *",
224
        'Name (time in ?s)     *',
225
        '----------------------*',
226
        'test_b[[]1[]]             *',
227
        'test_b[[]1[]]             *',
228
        '----------------------*',
229
    ])
230
231
232
def test_group_by_func(testdir):
233
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
234
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
235
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'func', test_x, test_y)
236
    result.stdout.fnmatch_lines([
237
        '*', '*', '*', '*',
238
        "* benchmark 'test_a': 4 tests *",
239
        'Name (time in ?s)     *',
240
        '----------------------*',
241
        'test_a[[]*[]]             *',
242
        'test_a[[]*[]]             *',
243
        'test_a[[]*[]]             *',
244
        'test_a[[]*[]]             *',
245
        '----------------------*',
246
        '*',
247
        "* benchmark 'test_b': 4 tests *",
248
        'Name (time in ?s)     *',
249
        '----------------------*',
250
        'test_b[[]*[]]             *',
251
        'test_b[[]*[]]             *',
252
        'test_b[[]*[]]             *',
253
        'test_b[[]*[]]             *',
254
        '----------------------*',
255
        '*', '*',
256
        '============* 8 passed* seconds ============*',
257
    ])
258
259
260
def test_group_by_fullfunc(testdir):
261
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
262
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
263
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullfunc', test_x, test_y)
264
    result.stdout.fnmatch_lines([
265
        '*', '*', '*', '*', '*',
266
        "* benchmark 'test_x.py::test_a': 2 tests *",
267
        'Name (time in ?s) *',
268
        '------------------*',
269
        'test_a[[]*[]]         *',
270
        'test_a[[]*[]]         *',
271
        '------------------*',
272
        '',
273
        "* benchmark 'test_x.py::test_b': 2 tests *",
274
        'Name (time in ?s) *',
275
        '------------------*',
276
        'test_b[[]*[]]         *',
277
        'test_b[[]*[]]         *',
278
        '------------------*',
279
        '',
280
        "* benchmark 'test_y.py::test_a': 2 tests *",
281
        'Name (time in ?s) *',
282
        '------------------*',
283
        'test_a[[]*[]]         *',
284
        'test_a[[]*[]]         *',
285
        '------------------*',
286
        '',
287
        "* benchmark 'test_y.py::test_b': 2 tests *",
288
        'Name (time in ?s) *',
289
        '------------------*',
290
        'test_b[[]*[]]         *',
291
        'test_b[[]*[]]         *',
292
        '------------------*',
293
        '',
294
        '(*) Outliers: 1 Standard Deviation from M*',
295
        '============* 8 passed* seconds ============*',
296
    ])
297
298
299
def test_group_by_param_all(testdir):
300
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
301
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
302
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x, test_y)
303
    result.stdout.fnmatch_lines([
304
        '*', '*', '*', '*', '*',
305
        "* benchmark '0': 4 tests *",
306
        'Name (time in ?s)  *',
307
        '-------------------*',
308
        'test_*[[]0[]]          *',
309
        'test_*[[]0[]]          *',
310
        'test_*[[]0[]]          *',
311
        'test_*[[]0[]]          *',
312
        '-------------------*',
313
        '',
314
        "* benchmark '1': 4 tests *",
315
        'Name (time in ?s) *',
316
        '------------------*',
317
        'test_*[[]1[]]         *',
318
        'test_*[[]1[]]         *',
319
        'test_*[[]1[]]         *',
320
        'test_*[[]1[]]         *',
321
        '------------------*',
322
        '',
323
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
324
        'Quartile.',
325
        '============* 8 passed* seconds ============*',
326
    ])
327
328
def test_group_by_param_select(testdir):
329
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
330
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
331
                               '--benchmark-group-by', 'param:foo',
332
                               '--benchmark-sort', 'fullname',
333
                               test_x)
334
    result.stdout.fnmatch_lines([
335
        '*', '*', '*', '*', '*',
336
        "* benchmark 'foo=foo1': 4 tests *",
337
        'Name (time in ?s)  *',
338
        '-------------------*',
339
        'test_a[[]foo1-bar1[]]    *',
340
        'test_a[[]foo1-bar2[]]    *',
341
        'test_b[[]foo1-bar1[]]    *',
342
        'test_b[[]foo1-bar2[]]    *',
343
        '-------------------*',
344
        '',
345
        "* benchmark 'foo=foo2': 4 tests *",
346
        'Name (time in ?s) *',
347
        '------------------*',
348
        'test_a[[]foo2-bar1[]]    *',
349
        'test_a[[]foo2-bar2[]]    *',
350
        'test_b[[]foo2-bar1[]]    *',
351
        'test_b[[]foo2-bar2[]]    *',
352
        '------------------*',
353
        '',
354
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
355
        'Quartile.',
356
        '============* 8 passed* seconds ============*',
357
    ])
358
359
360
def test_group_by_param_select_multiple(testdir):
361
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
362
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
363
                               '--benchmark-group-by', 'param:foo,param:bar',
364
                               '--benchmark-sort', 'fullname',
365
                               test_x)
366
    result.stdout.fnmatch_lines([
367
        '*', '*', '*', '*', '*',
368
        "* benchmark 'foo=foo1 bar=bar1': 2 tests *",
369
        'Name (time in ?s)  *',
370
        '-------------------*',
371
        'test_a[[]foo1-bar1[]]    *',
372
        'test_b[[]foo1-bar1[]]    *',
373
        '-------------------*',
374
        '',
375
        "* benchmark 'foo=foo1 bar=bar2': 2 tests *",
376
        'Name (time in ?s)  *',
377
        '-------------------*',
378
        'test_a[[]foo1-bar2[]]    *',
379
        'test_b[[]foo1-bar2[]]    *',
380
        '-------------------*',
381
        '',
382
        "* benchmark 'foo=foo2 bar=bar1': 2 tests *",
383
        'Name (time in ?s) *',
384
        '------------------*',
385
        'test_a[[]foo2-bar1[]]    *',
386
        'test_b[[]foo2-bar1[]]    *',
387
        '-------------------*',
388
        '',
389
        "* benchmark 'foo=foo2 bar=bar2': 2 tests *",
390
        'Name (time in ?s)  *',
391
        '-------------------*',
392
        'test_a[[]foo2-bar2[]]    *',
393
        'test_b[[]foo2-bar2[]]    *',
394
        '------------------*',
395
        '',
396
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
397
        'Quartile.',
398
        '============* 8 passed* seconds ============*',
399
    ])
400
401
def test_group_by_fullname(testdir):
402
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
403
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
404
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullname', test_x, test_y)
405
    result.stdout.fnmatch_lines_random([
406
        "* benchmark 'test_x.py::test_a[[]0[]]': 1 tests *",
407
        "* benchmark 'test_x.py::test_a[[]1[]]': 1 tests *",
408
        "* benchmark 'test_x.py::test_b[[]0[]]': 1 tests *",
409
        "* benchmark 'test_x.py::test_b[[]1[]]': 1 tests *",
410
        "* benchmark 'test_y.py::test_a[[]0[]]': 1 tests *",
411
        "* benchmark 'test_y.py::test_a[[]1[]]': 1 tests *",
412
        "* benchmark 'test_y.py::test_b[[]0[]]': 1 tests *",
413
        "* benchmark 'test_y.py::test_b[[]1[]]': 1 tests *",
414
        '============* 8 passed* seconds ============*',
415
    ])
416
417
418
def test_double_use(testdir):
419
    test = testdir.makepyfile('''
420
def test_a(benchmark):
421
    benchmark(lambda: None)
422
    benchmark.pedantic(lambda: None)
423
424
def test_b(benchmark):
425
    benchmark.pedantic(lambda: None)
426
    benchmark(lambda: None)
427
''')
428
    result = testdir.runpytest(test, '--tb=line')
429
    result.stdout.fnmatch_lines([
430
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark(...) mode.',
431
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark.pedantic(...) mode.',
432
    ])
433
434
435
def test_conflict_between_only_and_skip(testdir):
436
    test = testdir.makepyfile(SIMPLE_TEST)
437
    result = testdir.runpytest('--benchmark-only', '--benchmark-skip', test)
438
    result.stderr.fnmatch_lines([
439
        "ERROR: Can't have both --benchmark-only and --benchmark-skip options."
440
    ])
441
442
443
def test_conflict_between_only_and_disable(testdir):
444
    test = testdir.makepyfile(SIMPLE_TEST)
445
    result = testdir.runpytest('--benchmark-only', '--benchmark-disable', test)
446
    result.stderr.fnmatch_lines([
447
        "ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
448
        "automatically activated if xdist is on or you're missing the statistics dependency."
449
    ])
450
451
452
def test_max_time_min_rounds(testdir):
453
    test = testdir.makepyfile(SIMPLE_TEST)
454
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test)
455
    result.stdout.fnmatch_lines([
456
        "*collected 3 items",
457
        "test_max_time_min_rounds.py ...",
458
        "* benchmark: 2 tests *",
459
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
460
        "------*",
461
        "test_fast          * 1  *",
462
        "test_slow          * 1  *",
463
        "------*",
464
        "*====== 3 passed* seconds ======*",
465
    ])
466
467
468
def test_max_time(testdir):
469
    test = testdir.makepyfile(SIMPLE_TEST)
470
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', test)
471
    result.stdout.fnmatch_lines([
472
        "*collected 3 items",
473
        "test_max_time.py ...",
474
        "* benchmark: 2 tests *",
475
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
476
        "------*",
477
        "test_fast          * 5  *",
478
        "test_slow          * 5  *",
479
        "------*",
480
        "*====== 3 passed* seconds ======*",
481
    ])
482
483
484
def test_bogus_max_time(testdir):
485
    test = testdir.makepyfile(SIMPLE_TEST)
486
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=bogus', test)
487
    result.stderr.fnmatch_lines([
488
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
489
        "py*: error: argument --benchmark-max-time: Invalid decimal value 'bogus': InvalidOperation*",
490
    ])
491
492
493
@pytest.mark.skipif("platform.python_implementation() == 'PyPy'")
494
def test_pep418_timer(testdir):
495
    test = testdir.makepyfile(SIMPLE_TEST)
496
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
497
                               '--benchmark-timer=pep418.perf_counter', test)
498
    result.stdout.fnmatch_lines([
499
        "* (defaults: timer=*.perf_counter*",
500
    ])
501
502
503
def test_bad_save(testdir):
504
    test = testdir.makepyfile(SIMPLE_TEST)
505
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=asd:f?', test)
506
    result.stderr.fnmatch_lines([
507
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
508
        "py*: error: argument --benchmark-save: Must not contain any of these characters: /:*?<>|\\ (it has ':?')",
509
    ])
510
511
512
def test_bad_save_2(testdir):
513
    test = testdir.makepyfile(SIMPLE_TEST)
514
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=', test)
515
    result.stderr.fnmatch_lines([
516
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
517
        "py*: error: argument --benchmark-save: Can't be empty.",
518
    ])
519
520
521
def test_bad_compare_fail(testdir):
522
    test = testdir.makepyfile(SIMPLE_TEST)
523
    result = testdir.runpytest('--doctest-modules', '--benchmark-compare-fail=?', test)
524
    result.stderr.fnmatch_lines([
525
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
526
        "py*: error: argument --benchmark-compare-fail: Could not parse value: '?'.",
527
    ])
528
529
530
def test_bad_rounds(testdir):
531
    test = testdir.makepyfile(SIMPLE_TEST)
532
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=asd', test)
533
    result.stderr.fnmatch_lines([
534
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
535
        "py*: error: argument --benchmark-min-rounds: invalid literal for int() with base 10: 'asd'",
536
    ])
537
538
539
def test_bad_rounds_2(testdir):
540
    test = testdir.makepyfile(SIMPLE_TEST)
541
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=0', test)
542
    result.stderr.fnmatch_lines([
543
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
544
        "py*: error: argument --benchmark-min-rounds: Value for --benchmark-rounds must be at least 1.",
545
    ])
546
547
548
def test_compare(testdir):
549
    test = testdir.makepyfile(SIMPLE_TEST)
550
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
551
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
552
                               '--benchmark-compare-fail=min:0.1', test)
553
    result.stderr.fnmatch_lines([
554
        "Comparing against benchmarks from: *0001_test_compare0_unversioned_*.json",
555
    ])
556
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
557
                               '--benchmark-compare-fail=min:1%', test)
558
    result.stderr.fnmatch_lines([
559
        "Comparing against benchmarks from: *0001_test_compare0_unversioned_*.json",
560
    ])
561
562
563
def test_compare_last(testdir):
564
    test = testdir.makepyfile(SIMPLE_TEST)
565
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
566
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
567
                               '--benchmark-compare-fail=min:0.1', test)
568
    result.stderr.fnmatch_lines([
569
        "Comparing against benchmarks from: *0001_test_compare_last0_unversioned_*.json",
570
    ])
571
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
572
                               '--benchmark-compare-fail=min:1%', test)
573
    result.stderr.fnmatch_lines([
574
        "Comparing against benchmarks from: *0001_test_compare_last0_unversioned_*.json",
575
    ])
576
577
578
def test_compare_non_existing(testdir):
579
    test = testdir.makepyfile(SIMPLE_TEST)
580
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
581
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', '-rw',
582
                               test)
583
    result.stdout.fnmatch_lines([
584
        "WBENCHMARK-C1 * Can't compare. No benchmark files * '0002'.",
585
    ])
586
587
588
def test_compare_non_existing_verbose(testdir):
589
    test = testdir.makepyfile(SIMPLE_TEST)
590
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
591
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002',
592
                               test, '--benchmark-verbose')
593
    result.stderr.fnmatch_lines([
594
        " WARNING: Can't compare. No benchmark files * '0002'.",
595
    ])
596
597
598
def test_compare_no_files(testdir):
599
    test = testdir.makepyfile(SIMPLE_TEST)
600
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
601
                               test, '--benchmark-compare')
602
    result.stdout.fnmatch_lines([
603
         "WBENCHMARK-C2 * Can't compare. No benchmark files in '*'."
604
         " Can't load the previous benchmark."
605
    ])
606
607
608
def test_compare_no_files_verbose(testdir):
609
    test = testdir.makepyfile(SIMPLE_TEST)
610
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
611
                               test, '--benchmark-compare', '--benchmark-verbose')
612
    result.stderr.fnmatch_lines([
613
        " WARNING: Can't compare. No benchmark files in '*'."
614
        " Can't load the previous benchmark."
615
    ])
616
617
618
def test_compare_no_files_match(testdir):
619
    test = testdir.makepyfile(SIMPLE_TEST)
620
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
621
                               test, '--benchmark-compare=1')
622
    result.stdout.fnmatch_lines([
623
        "WBENCHMARK-C1 * Can't compare. No benchmark files in '*' match '1'."
624
    ])
625
626
627
def test_compare_no_files_match_verbose(testdir):
628
    test = testdir.makepyfile(SIMPLE_TEST)
629
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
630
                               test, '--benchmark-compare=1', '--benchmark-verbose')
631
    result.stderr.fnmatch_lines([
632
        " WARNING: Can't compare. No benchmark files in '*' match '1'."
633
    ])
634
635
636
def test_verbose(testdir):
637
    test = testdir.makepyfile(SIMPLE_TEST)
638
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-verbose',
639
                               '-vv', test)
640
    result.stderr.fnmatch_lines([
641
        "  Timer precision: *s",
642
        "  Calibrating to target round *s; will estimate when reaching *s.",
643
        "    Measured * iterations: *s.",
644
        "  Running * rounds x * iterations ...",
645
        "  Ran for *s.",
646
    ])
647
648
649
def test_save(testdir):
650
    test = testdir.makepyfile(SIMPLE_TEST)
651
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
652
                               '--benchmark-max-time=0.0000001', test)
653
    result.stderr.fnmatch_lines([
654
        "Saved benchmark data in: *",
655
    ])
656
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
657
658
659
def test_save_extra_info(testdir):
660
    test = testdir.makepyfile("""
661
    def test_extra(benchmark):
662
        benchmark.extra_info['foo'] = 'bar'
663
        benchmark(lambda: None)
664
    """)
665
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
666
                               '--benchmark-max-time=0.0000001', test)
667
    result.stderr.fnmatch_lines([
668
        "Saved benchmark data in: *",
669
    ])
670
    info = json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
671
    bench_info = info['benchmarks'][0]
672
    assert bench_info['name'] == 'test_extra'
673
    assert bench_info['extra_info'] == {'foo': 'bar'}
674
675
676
def test_histogram(testdir):
677
    test = testdir.makepyfile(SIMPLE_TEST)
678
    result = testdir.runpytest('--doctest-modules', '--benchmark-histogram=foobar',
679
                               '--benchmark-max-time=0.0000001', test)
680
    result.stderr.fnmatch_lines([
681
        "Generated histogram: *foobar.svg",
682
    ])
683
    assert [f.basename for f in testdir.tmpdir.listdir("*.svg", sort=True)] == [
684
        'foobar.svg',
685
    ]
686
687
688
def test_autosave(testdir):
689
    test = testdir.makepyfile(SIMPLE_TEST)
690
    result = testdir.runpytest('--doctest-modules', '--benchmark-autosave',
691
                               '--benchmark-max-time=0.0000001', test)
692
    result.stderr.fnmatch_lines([
693
        "Saved benchmark data in: *",
694
    ])
695
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].listdir('0001_*.json')[0].read())
696
697
698
def test_bogus_min_time(testdir):
699
    test = testdir.makepyfile(SIMPLE_TEST)
700
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-time=bogus', test)
701
    result.stderr.fnmatch_lines([
702
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
703
        "py*: error: argument --benchmark-min-time: Invalid decimal value 'bogus': InvalidOperation*",
704
    ])
705
706
707
def test_disable_gc(testdir):
708
    test = testdir.makepyfile(SIMPLE_TEST)
709
    result = testdir.runpytest('--benchmark-disable-gc', test)
710
    result.stdout.fnmatch_lines([
711
        "*collected 2 items",
712
        "test_disable_gc.py ..",
713
        "* benchmark: 2 tests *",
714
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
715
        "------*",
716
        "test_fast          *",
717
        "test_slow          *",
718
        "------*",
719
        "*====== 2 passed* seconds ======*",
720
    ])
721
722
723
def test_custom_timer(testdir):
724
    test = testdir.makepyfile(SIMPLE_TEST)
725
    result = testdir.runpytest('--benchmark-timer=time.time', test)
726
    result.stdout.fnmatch_lines([
727
        "*collected 2 items",
728
        "test_custom_timer.py ..",
729
        "* benchmark: 2 tests *",
730
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
731
        "------*",
732
        "test_fast          *",
733
        "test_slow          *",
734
        "------*",
735
        "*====== 2 passed* seconds ======*",
736
    ])
737
738
739
def test_bogus_timer(testdir):
740
    test = testdir.makepyfile(SIMPLE_TEST)
741
    result = testdir.runpytest('--benchmark-timer=bogus', test)
742
    result.stderr.fnmatch_lines([
743
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
744
        "py*: error: argument --benchmark-timer: Value for --benchmark-timer must be in dotted form. Eg: "
745
        "'module.attr'.",
746
    ])
747
748
749
def test_sort_by_mean(testdir):
750
    test = testdir.makepyfile(SIMPLE_TEST)
751
    result = testdir.runpytest('--benchmark-sort=mean', test)
752
    result.stdout.fnmatch_lines([
753
        "*collected 2 items",
754
        "test_sort_by_mean.py ..",
755
        "* benchmark: 2 tests *",
756
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
757
        "------*",
758
        "test_fast          *",
759
        "test_slow          *",
760
        "------*",
761
        "*====== 2 passed* seconds ======*",
762
    ])
763
764
765
def test_bogus_sort(testdir):
766
    test = testdir.makepyfile(SIMPLE_TEST)
767
    result = testdir.runpytest('--benchmark-sort=bogus', test)
768
    result.stderr.fnmatch_lines([
769
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
770
        "py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one of: 'min', 'max', 'mean', 'stddev', 'name', 'fullname'."
771
    ])
772
773
774
def test_xdist(testdir):
775
    pytest.importorskip('xdist')
776
    test = testdir.makepyfile(SIMPLE_TEST)
777
    result = testdir.runpytest('--doctest-modules', '-n', '1', '-rw', test)
778
    result.stdout.fnmatch_lines([
779
        "WBENCHMARK-U2 * Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be "
780
        "performed reliably in a parallelized environment.",
781
    ])
782
783
784
def test_xdist_verbose(testdir):
785
    pytest.importorskip('xdist')
786
    test = testdir.makepyfile(SIMPLE_TEST)
787
    result = testdir.runpytest('--doctest-modules', '-n', '1', '--benchmark-verbose', test)
788
    result.stderr.fnmatch_lines([
789
        "------*",
790
        " WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed "
791
        "reliably in a parallelized environment.",
792
        "------*",
793
    ])
794
795
796
def test_cprofile(testdir):
797
    test = testdir.makepyfile(SIMPLE_TEST)
798
    result = testdir.runpytest('--benchmark-cprofile=cumtime', test)
799
    result.stdout.fnmatch_lines([
800
        "============================= cProfile information =============================",
801
        "Time in s",
802
        "test_cprofile.py::test_fast",
803
        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
804
        # "1	0.0000	0.0000	0.0001	0.0001	test_cprofile0/test_cprofile.py:9(result)",
805
        # "1	0.0001	0.0001	0.0001	0.0001	~:0(<built-in method time.sleep>)",
806
        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
807
        "",
808
        "test_cprofile.py::test_slow",
809
        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
810
        # "1	0.0000	0.0000	0.1002	0.1002	test_cprofile0/test_cprofile.py:15(<lambda>)",
811
        # "1	0.1002	0.1002	0.1002	0.1002	~:0(<built-in method time.sleep>)",
812
        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
813
    ])
814
815
816
def test_abort_broken(testdir):
817
    """
818
    Test that we don't benchmark code that raises exceptions.
819
    """
820
    test = testdir.makepyfile('''
821
"""
822
    >>> print('Yay, doctests!')
823
    Yay, doctests!
824
"""
825
import time
826
import pytest
827
828
def test_bad(benchmark):
829
    @benchmark
830
    def result():
831
        raise Exception()
832
    assert 1 == 1
833
834
def test_bad2(benchmark):
835
    @benchmark
836
    def result():
837
        time.sleep(0.1)
838
    assert 1 == 0
839
840
@pytest.fixture(params=['a', 'b', 'c'])
841
def bad_fixture(request):
842
    raise ImportError()
843
844
def test_ok(benchmark, bad_fixture):
845
    @benchmark
846
    def result():
847
        time.sleep(0.1)
848
    assert 1 == 0
849
''')
850
    result = testdir.runpytest('-vv', test)
851
    result.stdout.fnmatch_lines([
852
        "*collected 5 items",
853
854
        "test_abort_broken.py::test_bad FAILED",
855
        "test_abort_broken.py::test_bad2 FAILED",
856
        "test_abort_broken.py::test_ok[a] ERROR",
857
        "test_abort_broken.py::test_ok[b] ERROR",
858
        "test_abort_broken.py::test_ok[c] ERROR",
859
860
        "*====== ERRORS ======*",
861
        "*______ ERROR at setup of test_ok[[]a[]] ______*",
862
863
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[a]'>>",
864
865
        "    @pytest.fixture(params=['a', 'b', 'c'])",
866
        "    def bad_fixture(request):",
867
        ">       raise ImportError()",
868
        "E       ImportError",
869
870
        "test_abort_broken.py:22: ImportError",
871
        "*______ ERROR at setup of test_ok[[]b[]] ______*",
872
873
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[b]'>>",
874
875
        "    @pytest.fixture(params=['a', 'b', 'c'])",
876
        "    def bad_fixture(request):",
877
        ">       raise ImportError()",
878
        "E       ImportError",
879
880
        "test_abort_broken.py:22: ImportError",
881
        "*______ ERROR at setup of test_ok[[]c[]] ______*",
882
883
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[c]'>>",
884
885
        "    @pytest.fixture(params=['a', 'b', 'c'])",
886
        "    def bad_fixture(request):",
887
        ">       raise ImportError()",
888
        "E       ImportError",
889
890
        "test_abort_broken.py:22: ImportError",
891
        "*====== FAILURES ======*",
892
        "*______ test_bad ______*",
893
894
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
895
896
        "    def test_bad(benchmark):",
897
        ">       @benchmark",
898
        "        def result():",
899
900
        "test_abort_broken.py:*",
901
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
902
        "*",
903
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
904
905
        "    @benchmark",
906
        "    def result():",
907
        ">       raise Exception()",
908
        "E       Exception",
909
910
        "test_abort_broken.py:11: Exception",
911
        "*______ test_bad2 ______*",
912
913
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
914
915
        "    def test_bad2(benchmark):",
916
        "        @benchmark",
917
        "        def result():",
918
        "            time.sleep(0.1)",
919
        ">       assert 1 == 0",
920
        "E       assert 1 == 0",
921
922
        "test_abort_broken.py:18: AssertionError",
923
    ])
924
925
    result.stdout.fnmatch_lines([
926
        "* benchmark: 1 tests *",
927
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
928
        "------*",
929
        "test_bad2           *",
930
        "------*",
931
932
        "*====== 2 failed*, 3 error* seconds ======*",
933
    ])
934
935
BASIC_TEST = '''
936
"""
937
Just to make sure the plugin doesn't choke on doctests::
938
    >>> print('Yay, doctests!')
939
    Yay, doctests!
940
"""
941
import time
942
from functools import partial
943
944
import pytest
945
946
def test_fast(benchmark):
947
    @benchmark
948
    def result():
949
        return time.sleep(0.000001)
950
    assert result is None
951
952
def test_slow(benchmark):
953
    assert benchmark(partial(time.sleep, 0.001)) is None
954
955
def test_slower(benchmark):
956
    benchmark(lambda: time.sleep(0.01))
957
958
@pytest.mark.benchmark(min_rounds=2)
959
def test_xfast(benchmark):
960
    benchmark(str)
961
962
def test_fast(benchmark):
963
    benchmark(int)
964
'''
965
966
967
def test_basic(testdir):
968
    test = testdir.makepyfile(BASIC_TEST)
969
    result = testdir.runpytest('-vv', '--doctest-modules', test)
970
    result.stdout.fnmatch_lines([
971
        "*collected 5 items",
972
        "test_basic.py::*test_basic PASSED",
973
        "test_basic.py::test_slow PASSED",
974
        "test_basic.py::test_slower PASSED",
975
        "test_basic.py::test_xfast PASSED",
976
        "test_basic.py::test_fast PASSED",
977
        "",
978
        "* benchmark: 4 tests *",
979
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
980
        "------*",
981
        "test_*         *",
982
        "test_*         *",
983
        "test_*         *",
984
        "test_*         *",
985
        "------*",
986
        "",
987
        "*====== 5 passed* seconds ======*",
988
    ])
989
990
991
def test_skip(testdir):
992
    test = testdir.makepyfile(BASIC_TEST)
993
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-skip', test)
994
    result.stdout.fnmatch_lines([
995
        "*collected 5 items",
996
        "test_skip.py::*test_skip PASSED",
997
        "test_skip.py::test_slow SKIPPED",
998
        "test_skip.py::test_slower SKIPPED",
999
        "test_skip.py::test_xfast SKIPPED",
1000
        "test_skip.py::test_fast SKIPPED",
1001
        "*====== 1 passed, 4 skipped* seconds ======*",
1002
    ])
1003
1004
1005
def test_disable(testdir):
1006
    test = testdir.makepyfile(BASIC_TEST)
1007
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test)
1008
    result.stdout.fnmatch_lines([
1009
        "*collected 5 items",
1010
        "test_disable.py::*test_disable PASSED",
1011
        "test_disable.py::test_slow PASSED",
1012
        "test_disable.py::test_slower PASSED",
1013
        "test_disable.py::test_xfast PASSED",
1014
        "test_disable.py::test_fast PASSED",
1015
        "*====== 5 passed * seconds ======*",
1016
    ])
1017
1018
1019
def test_mark_selection(testdir):
1020
    test = testdir.makepyfile(BASIC_TEST)
1021
    result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test)
1022
    result.stdout.fnmatch_lines([
1023
        "*collected 5 items",
1024
        "test_mark_selection.py::test_xfast PASSED",
1025
        "* benchmark: 1 tests *",
1026
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1027
        "------*",
1028
        "test_xfast       *",
1029
        "------*",
1030
        "*====== 4 tests deselected* ======*",
1031
        "*====== 1 passed, 4 deselected* seconds ======*",
1032
    ])
1033
1034
1035
def test_only_benchmarks(testdir):
1036
    test = testdir.makepyfile(BASIC_TEST)
1037
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-only', test)
1038
    result.stdout.fnmatch_lines([
1039
        "*collected 5 items",
1040
        "test_only_benchmarks.py::*test_only_benchmarks SKIPPED",
1041
        "test_only_benchmarks.py::test_slow PASSED",
1042
        "test_only_benchmarks.py::test_slower PASSED",
1043
        "test_only_benchmarks.py::test_xfast PASSED",
1044
        "test_only_benchmarks.py::test_fast PASSED",
1045
        "* benchmark: 4 tests *",
1046
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1047
        "------*",
1048
        "test_*         *",
1049
        "test_*         *",
1050
        "test_*         *",
1051
        "test_*         *",
1052
        "------*",
1053
        "*====== 4 passed, 1 skipped* seconds ======*",
1054
    ])
1055
1056
def test_columns(testdir):
1057
    test = testdir.makepyfile(SIMPLE_TEST)
1058
    result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,iterations,min', test)
1059
    result.stdout.fnmatch_lines([
1060
        "*collected 3 items",
1061
        "test_columns.py ...",
1062
        "* benchmark: 2 tests *",
1063
        "Name (time in ?s) * Max * Iterations * Min *",
1064
        "------*",
1065
    ])
1066