Completed
Pull Request — master (#116)
by
unknown
22s
created

test_conflict_between_only_and_skip()   A

Complexity

Conditions 1

Size

Total Lines 5

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
c 0
b 0
f 0
dl 0
loc 5
rs 9.4285
1
import json
2
import platform
3
4
import pytest
5
6
pytest_plugins = 'pytester',
7
platform
8
9
10
def test_help(testdir):
11
    result = testdir.runpytest('--help')
12
    result.stdout.fnmatch_lines([
13
        "*", "*",
14
        "benchmark:",
15
        "  --benchmark-min-time=SECONDS",
16
        "                        Minimum time per round in seconds. Default: '0.000005'",
17
        "  --benchmark-max-time=SECONDS",
18
        "                        Maximum run time per test - it will be repeated until",
19
        "                        this total time is reached. It may be exceeded if test",
20
        "                        function is very slow or --benchmark-min-rounds is",
21
        "                        large (it takes precedence). Default: '1.0'",
22
        "  --benchmark-min-rounds=NUM",
23
        "                        Minimum rounds, even if total time would exceed",
24
        "                        `--max-time`. Default: 5",
25
        "  --benchmark-timer=FUNC",
26
        "                        Timer to use when measuring time. Default:*",
27
        "  --benchmark-calibration-precision=NUM",
28
        "                        Precision to use when calibrating number of",
29
        "                        iterations. Precision of 10 will make the timer look",
30
        "                        10 times more accurate, at a cost of less precise",
31
        "                        measure of deviations. Default: 10",
32
        "  --benchmark-warmup=[KIND]",
33
        "                        Activates warmup. Will run the test function up to",
34
        "                        number of times in the calibration phase. See",
35
        "                        `--benchmark-warmup-iterations`. Note: Even the warmup",
36
        "                        phase obeys --benchmark-max-time. Available KIND:",
37
        "                        'auto', 'off', 'on'. Default: 'auto' (automatically",
38
        "                        activate on PyPy).",
39
        "  --benchmark-warmup-iterations=NUM",
40
        "                        Max number of iterations to run in the warmup phase.",
41
        "                        Default: 100000",
42
        "  --benchmark-disable-gc",
43
        "                        Disable GC during benchmarks.",
44
        "  --benchmark-skip      Skip running any tests that contain benchmarks.",
45
        "  --benchmark-only      Only run benchmarks.",
46
        "  --benchmark-save=NAME",
47
        "                        Save the current run into 'STORAGE-",
48
        "                        PATH/counter_NAME.json'.",
49
        "  --benchmark-autosave  Autosave the current run into 'STORAGE-",
50
        "                        PATH/counter*.json",
51
        "  --benchmark-save-data",
52
        "                        Use this to make --benchmark-save and --benchmark-",
53
        "                        autosave include all the timing data, not just the",
54
        "                        stats.",
55
        "  --benchmark-json=PATH",
56
        "                        Dump a JSON report into PATH. Note that this will",
57
        "                        include the complete data (all the timings, not just",
58
        "                        the stats).",
59
        "  --benchmark-compare=[NUM|_ID]",
60
        "                        Compare the current run against run NUM (or prefix of",
61
        "                        _id in elasticsearch) or the latest saved run if",
62
        "                        unspecified.",
63
        "  --benchmark-compare-fail=EXPR?[[]EXPR?...[]]",
64
        "                        Fail test if performance regresses according to given",
65
        "                        EXPR (eg: min:5% or mean:0.001 for number of seconds).",
66
        "                        Can be used multiple times.",
67
        "  --benchmark-cprofile=COLUMN",
68
        "                        If specified measure one run with cProfile and stores",
69
        "                        10 top functions. Argument is a column to sort by.",
70
        "                        Available columns: 'ncallls_recursion', 'ncalls',",
71
        "                        'tottime', 'tottime_per', 'cumtime', 'cumtime_per',",
72
        "                        'function_name'.",
73
        "  --benchmark-storage=URI",
74
        "                        Specify a path to store the runs as uri in form",
75
        "                        file://path or elasticsearch+http[s]://host1,host2/[in",
76
        "                        dex/doctype?project_name=Project] (when --benchmark-",
77
        "                        save or --benchmark-autosave are used). For backwards",
78
        "                        compatibility unexpected values are converted to",
79
        "                        file://<value>. Default: 'file://./.benchmarks'.",
80
        "  --benchmark-verbose   Dump diagnostic and progress information.",
81
        "  --benchmark-sort=COL  Column to sort on. Can be one of: 'min', 'max',",
82
        "                        'mean', 'stddev', 'name', 'fullname'. Default: 'min'",
83
        "  --benchmark-group-by=LABEL",
84
        "                        How to group tests. Can be one of: 'group', 'name',",
85
        "                        'fullname', 'func', 'fullfunc', 'param' or",
86
        "                        'param:NAME', where NAME is the name passed to",
87
        "                        @pytest.parametrize. Default: 'group'",
88
        "  --benchmark-columns=LABELS",
89
        "                        Comma-separated list of columns to show in the result",
90
        "                        table. Default: 'min, max, mean, stddev, median, iqr,",
91
        "                        outliers, rounds, iterations'",
92
        "  --benchmark-histogram=[FILENAME-PREFIX]",
93
        "                        Plot graphs of min/max/avg/stddev over time in",
94
        "                        FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX",
95
        "                        contains slashes ('/') then directories will be",
96
        "                        created. Default: '*'",
97
        "*",
98
    ])
99
100
101
def test_groups(testdir):
102
    test = testdir.makepyfile('''"""
103
    >>> print('Yay, doctests!')
104
    Yay, doctests!
105
"""
106
import time
107
import pytest
108
109
def test_fast(benchmark):
110
    benchmark(lambda: time.sleep(0.000001))
111
    assert 1 == 1
112
113
def test_slow(benchmark):
114
    benchmark(lambda: time.sleep(0.001))
115
    assert 1 == 1
116
117
@pytest.mark.benchmark(group="A")
118
def test_slower(benchmark):
119
    benchmark(lambda: time.sleep(0.01))
120
    assert 1 == 1
121
122
@pytest.mark.benchmark(group="A", warmup=True)
123
def test_xfast(benchmark):
124
    benchmark(lambda: None)
125
    assert 1 == 1
126
''')
127
    result = testdir.runpytest('-vv', '--doctest-modules', test)
128
    result.stdout.fnmatch_lines([
129
        "*collected 5 items",
130
        "*",
131
        "test_groups.py::*test_groups PASSED*",
132
        "test_groups.py::test_fast PASSED*",
133
        "test_groups.py::test_slow PASSED*",
134
        "test_groups.py::test_slower PASSED*",
135
        "test_groups.py::test_xfast PASSED*",
136
        "*",
137
        "* benchmark: 2 tests *",
138
        "*",
139
        "* benchmark 'A': 2 tests *",
140
        "*",
141
        "*====== 5 passed* seconds ======*",
142
    ])
143
144
145
SIMPLE_TEST = '''
146
"""
147
    >>> print('Yay, doctests!')
148
    Yay, doctests!
149
"""
150
import time
151
import pytest
152
153
def test_fast(benchmark):
154
    @benchmark
155
    def result():
156
        return time.sleep(0.000001)
157
    assert result == None
158
159
def test_slow(benchmark):
160
    benchmark(lambda: time.sleep(0.1))
161
    assert 1 == 1
162
'''
163
164
GROUPING_TEST = '''
165
import pytest
166
167
@pytest.mark.parametrize("foo", range(2))
168
@pytest.mark.benchmark(group="A")
169
def test_a(benchmark, foo):
170
    benchmark(str)
171
172
@pytest.mark.parametrize("foo", range(2))
173
@pytest.mark.benchmark(group="B")
174
def test_b(benchmark, foo):
175
    benchmark(int)
176
'''
177
178
GROUPING_PARAMS_TEST = '''
179
import pytest
180
181
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
182
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
183
@pytest.mark.benchmark(group="A")
184
def test_a(benchmark, foo, bar):
185
    benchmark(str)
186
187
188
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
189
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
190
@pytest.mark.benchmark(group="B")
191
def test_b(benchmark, foo, bar):
192
    benchmark(int)
193
'''
194
195
196
def test_group_by_name(testdir):
197
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
198
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
199
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'name', test_x, test_y)
200
    result.stdout.fnmatch_lines([
201
        '*', '*', '*', '*', '*',
202
        "* benchmark 'test_a[[]0[]]': 2 tests *",
203
        'Name (time in ?s)     *',
204
        '----------------------*',
205
        'test_a[[]0[]]             *',
206
        'test_a[[]0[]]             *',
207
        '----------------------*',
208
        '*',
209
        "* benchmark 'test_a[[]1[]]': 2 tests *",
210
        'Name (time in ?s)     *',
211
        '----------------------*',
212
        'test_a[[]1[]]             *',
213
        'test_a[[]1[]]             *',
214
        '----------------------*',
215
        '*',
216
        "* benchmark 'test_b[[]0[]]': 2 tests *",
217
        'Name (time in ?s)     *',
218
        '----------------------*',
219
        'test_b[[]0[]]             *',
220
        'test_b[[]0[]]             *',
221
        '----------------------*',
222
        '*',
223
        "* benchmark 'test_b[[]1[]]': 2 tests *",
224
        'Name (time in ?s)     *',
225
        '----------------------*',
226
        'test_b[[]1[]]             *',
227
        'test_b[[]1[]]             *',
228
        '----------------------*',
229
    ])
230
231
232
def test_group_by_func(testdir):
233
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
234
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
235
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'func', test_x, test_y)
236
    result.stdout.fnmatch_lines([
237
        '*', '*', '*', '*',
238
        "* benchmark 'test_a': 4 tests *",
239
        'Name (time in ?s)     *',
240
        '----------------------*',
241
        'test_a[[]*[]]             *',
242
        'test_a[[]*[]]             *',
243
        'test_a[[]*[]]             *',
244
        'test_a[[]*[]]             *',
245
        '----------------------*',
246
        '*',
247
        "* benchmark 'test_b': 4 tests *",
248
        'Name (time in ?s)     *',
249
        '----------------------*',
250
        'test_b[[]*[]]             *',
251
        'test_b[[]*[]]             *',
252
        'test_b[[]*[]]             *',
253
        'test_b[[]*[]]             *',
254
        '----------------------*',
255
        '*', '*',
256
        '============* 8 passed* seconds ============*',
257
    ])
258
259
260
def test_group_by_fullfunc(testdir):
261
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
262
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
263
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullfunc', test_x, test_y)
264
    result.stdout.fnmatch_lines([
265
        '*', '*', '*', '*', '*',
266
        "* benchmark 'test_x.py::test_a': 2 tests *",
267
        'Name (time in ?s) *',
268
        '------------------*',
269
        'test_a[[]*[]]         *',
270
        'test_a[[]*[]]         *',
271
        '------------------*',
272
        '',
273
        "* benchmark 'test_x.py::test_b': 2 tests *",
274
        'Name (time in ?s) *',
275
        '------------------*',
276
        'test_b[[]*[]]         *',
277
        'test_b[[]*[]]         *',
278
        '------------------*',
279
        '',
280
        "* benchmark 'test_y.py::test_a': 2 tests *",
281
        'Name (time in ?s) *',
282
        '------------------*',
283
        'test_a[[]*[]]         *',
284
        'test_a[[]*[]]         *',
285
        '------------------*',
286
        '',
287
        "* benchmark 'test_y.py::test_b': 2 tests *",
288
        'Name (time in ?s) *',
289
        '------------------*',
290
        'test_b[[]*[]]         *',
291
        'test_b[[]*[]]         *',
292
        '------------------*',
293
        '',
294
        'Legend:',
295
        '  Outliers: 1 Standard Deviation from M*',
296
        '============* 8 passed* seconds ============*',
297
    ])
298
299
300
def test_group_by_param_all(testdir):
301
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
302
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
303
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x, test_y)
304
    result.stdout.fnmatch_lines([
305
        '*', '*', '*', '*', '*',
306
        "* benchmark '0': 4 tests *",
307
        'Name (time in ?s)  *',
308
        '-------------------*',
309
        'test_*[[]0[]]          *',
310
        'test_*[[]0[]]          *',
311
        'test_*[[]0[]]          *',
312
        'test_*[[]0[]]          *',
313
        '-------------------*',
314
        '',
315
        "* benchmark '1': 4 tests *",
316
        'Name (time in ?s) *',
317
        '------------------*',
318
        'test_*[[]1[]]         *',
319
        'test_*[[]1[]]         *',
320
        'test_*[[]1[]]         *',
321
        'test_*[[]1[]]         *',
322
        '------------------*',
323
        '',
324
        'Legend:',
325
        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
326
        'Quartile.',
327
        '============* 8 passed* seconds ============*',
328
    ])
329
330
331
def test_group_by_param_select(testdir):
332
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
333
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
334
                               '--benchmark-group-by', 'param:foo',
335
                               '--benchmark-sort', 'fullname',
336
                               test_x)
337
    result.stdout.fnmatch_lines([
338
        '*', '*', '*', '*', '*',
339
        "* benchmark 'foo=foo1': 4 tests *",
340
        'Name (time in ?s)  *',
341
        '-------------------*',
342
        'test_a[[]foo1-bar1[]]    *',
343
        'test_a[[]foo1-bar2[]]    *',
344
        'test_b[[]foo1-bar1[]]    *',
345
        'test_b[[]foo1-bar2[]]    *',
346
        '-------------------*',
347
        '',
348
        "* benchmark 'foo=foo2': 4 tests *",
349
        'Name (time in ?s) *',
350
        '------------------*',
351
        'test_a[[]foo2-bar1[]]    *',
352
        'test_a[[]foo2-bar2[]]    *',
353
        'test_b[[]foo2-bar1[]]    *',
354
        'test_b[[]foo2-bar2[]]    *',
355
        '------------------*',
356
        '',
357
        'Legend:',
358
        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
359
        'Quartile.',
360
        '============* 8 passed* seconds ============*',
361
    ])
362
363
364
def test_group_by_param_select_multiple(testdir):
365
    test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
366
    result = testdir.runpytest('--benchmark-max-time=0.0000001',
367
                               '--benchmark-group-by', 'param:foo,param:bar',
368
                               '--benchmark-sort', 'fullname',
369
                               test_x)
370
    result.stdout.fnmatch_lines([
371
        '*', '*', '*', '*', '*',
372
        "* benchmark 'foo=foo1 bar=bar1': 2 tests *",
373
        'Name (time in ?s)  *',
374
        '-------------------*',
375
        'test_a[[]foo1-bar1[]]    *',
376
        'test_b[[]foo1-bar1[]]    *',
377
        '-------------------*',
378
        '',
379
        "* benchmark 'foo=foo1 bar=bar2': 2 tests *",
380
        'Name (time in ?s)  *',
381
        '-------------------*',
382
        'test_a[[]foo1-bar2[]]    *',
383
        'test_b[[]foo1-bar2[]]    *',
384
        '-------------------*',
385
        '',
386
        "* benchmark 'foo=foo2 bar=bar1': 2 tests *",
387
        'Name (time in ?s) *',
388
        '------------------*',
389
        'test_a[[]foo2-bar1[]]    *',
390
        'test_b[[]foo2-bar1[]]    *',
391
        '-------------------*',
392
        '',
393
        "* benchmark 'foo=foo2 bar=bar2': 2 tests *",
394
        'Name (time in ?s)  *',
395
        '-------------------*',
396
        'test_a[[]foo2-bar2[]]    *',
397
        'test_b[[]foo2-bar2[]]    *',
398
        '------------------*',
399
        '',
400
        'Legend:',
401
        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
402
        'Quartile.',
403
        '============* 8 passed* seconds ============*',
404
    ])
405
406
407
def test_group_by_fullname(testdir):
408
    test_x = testdir.makepyfile(test_x=GROUPING_TEST)
409
    test_y = testdir.makepyfile(test_y=GROUPING_TEST)
410
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'fullname', test_x, test_y)
411
    result.stdout.fnmatch_lines_random([
412
        "* benchmark 'test_x.py::test_a[[]0[]]': 1 tests *",
413
        "* benchmark 'test_x.py::test_a[[]1[]]': 1 tests *",
414
        "* benchmark 'test_x.py::test_b[[]0[]]': 1 tests *",
415
        "* benchmark 'test_x.py::test_b[[]1[]]': 1 tests *",
416
        "* benchmark 'test_y.py::test_a[[]0[]]': 1 tests *",
417
        "* benchmark 'test_y.py::test_a[[]1[]]': 1 tests *",
418
        "* benchmark 'test_y.py::test_b[[]0[]]': 1 tests *",
419
        "* benchmark 'test_y.py::test_b[[]1[]]': 1 tests *",
420
        '============* 8 passed* seconds ============*',
421
    ])
422
423
424
def test_double_use(testdir):
425
    test = testdir.makepyfile('''
426
def test_a(benchmark):
427
    benchmark(lambda: None)
428
    benchmark.pedantic(lambda: None)
429
430
def test_b(benchmark):
431
    benchmark.pedantic(lambda: None)
432
    benchmark(lambda: None)
433
''')
434
    result = testdir.runpytest(test, '--tb=line')
435
    result.stdout.fnmatch_lines([
436
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark(...) mode.',
437
        '*FixtureAlreadyUsed: Fixture can only be used once. Previously it was used in benchmark.pedantic(...) mode.',
438
    ])
439
440
441
def test_only_override_skip(testdir):
442
    test = testdir.makepyfile(SIMPLE_TEST)
443
    result = testdir.runpytest('--benchmark-only', '--benchmark-skip', test)
444
    result.stderr.fnmatch_lines([
445
        "*collected 2 items",
446
        "test_only_override_skip.py ..*",
447
        "* benchmark: 2 tests *",
448
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
449
        "------*",
450
        "test_fast          *",
451
        "test_slow          *",
452
        "------*",
453
        "*====== 2 passed* seconds ======*",
454
    ])
455
456
457
def test_conflict_between_only_and_disable(testdir):
458
    test = testdir.makepyfile(SIMPLE_TEST)
459
    result = testdir.runpytest('--benchmark-only', '--benchmark-disable', test)
460
    result.stderr.fnmatch_lines([
461
        "ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
462
        "automatically activated if xdist is on or you're missing the statistics dependency."
463
    ])
464
465
466
def test_max_time_min_rounds(testdir):
467
    test = testdir.makepyfile(SIMPLE_TEST)
468
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test)
469
    result.stdout.fnmatch_lines([
470
        "*collected 3 items",
471
        "test_max_time_min_rounds.py ...*",
472
        "* benchmark: 2 tests *",
473
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
474
        "------*",
475
        "test_fast          * 1  *",
476
        "test_slow          * 1  *",
477
        "------*",
478
        "*====== 3 passed* seconds ======*",
479
    ])
480
481
482
def test_max_time(testdir):
483
    test = testdir.makepyfile(SIMPLE_TEST)
484
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', test)
485
    result.stdout.fnmatch_lines([
486
        "*collected 3 items",
487
        "test_max_time.py ...*",
488
        "* benchmark: 2 tests *",
489
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
490
        "------*",
491
        "test_fast          * 5  *",
492
        "test_slow          * 5  *",
493
        "------*",
494
        "*====== 3 passed* seconds ======*",
495
    ])
496
497
498
def test_bogus_max_time(testdir):
499
    test = testdir.makepyfile(SIMPLE_TEST)
500
    result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=bogus', test)
501
    result.stderr.fnmatch_lines([
502
        "usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
503
        "*py*: error: argument --benchmark-max-time: Invalid decimal value 'bogus': InvalidOperation*",
504
    ])
505
506
507
@pytest.mark.skipif("platform.python_implementation() == 'PyPy'")
508
def test_pep418_timer(testdir):
509
    test = testdir.makepyfile(SIMPLE_TEST)
510
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
511
                               '--benchmark-timer=pep418.perf_counter', test)
512
    result.stdout.fnmatch_lines([
513
        "* (defaults: timer=*.perf_counter*",
514
    ])
515
516
517
def test_bad_save(testdir):
518
    test = testdir.makepyfile(SIMPLE_TEST)
519
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=asd:f?', test)
520
    result.stderr.fnmatch_lines([
521
        "usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
522
        "*py*: error: argument --benchmark-save: Must not contain any of these characters: /:*?<>|\\ (it has ':?')",
523
    ])
524
525
526
def test_bad_save_2(testdir):
527
    test = testdir.makepyfile(SIMPLE_TEST)
528
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=', test)
529
    result.stderr.fnmatch_lines([
530
        "usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
531
        "*py*: error: argument --benchmark-save: Can't be empty.",
532
    ])
533
534
535
def test_bad_compare_fail(testdir):
536
    test = testdir.makepyfile(SIMPLE_TEST)
537
    result = testdir.runpytest('--doctest-modules', '--benchmark-compare-fail=?', test)
538
    result.stderr.fnmatch_lines([
539
        "usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
540
        "*py*: error: argument --benchmark-compare-fail: Could not parse value: '?'.",
541
    ])
542
543
544
def test_bad_rounds(testdir):
545
    test = testdir.makepyfile(SIMPLE_TEST)
546
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=asd', test)
547
    result.stderr.fnmatch_lines([
548
        "usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
549
        "*py*: error: argument --benchmark-min-rounds: invalid literal for int() with base 10: 'asd'",
550
    ])
551
552
553
def test_bad_rounds_2(testdir):
554
    test = testdir.makepyfile(SIMPLE_TEST)
555
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-rounds=0', test)
556
    result.stderr.fnmatch_lines([
557
        "usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
558
        "*py*: error: argument --benchmark-min-rounds: Value for --benchmark-rounds must be at least 1.",
559
    ])
560
561
562
def test_compare(testdir):
563
    test = testdir.makepyfile(SIMPLE_TEST)
564
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
565
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
566
                               '--benchmark-compare-fail=min:0.1', test)
567
    result.stderr.fnmatch_lines([
568
        "Comparing against benchmarks from: *0001_unversioned_*.json",
569
    ])
570
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0001',
571
                               '--benchmark-compare-fail=min:1%', test)
572
    result.stderr.fnmatch_lines([
573
        "Comparing against benchmarks from: *0001_unversioned_*.json",
574
    ])
575
576
577
def test_compare_last(testdir):
578
    test = testdir.makepyfile(SIMPLE_TEST)
579
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
580
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
581
                               '--benchmark-compare-fail=min:0.1', test)
582
    result.stderr.fnmatch_lines([
583
        "Comparing against benchmarks from: *0001_unversioned_*.json",
584
    ])
585
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare',
586
                               '--benchmark-compare-fail=min:1%', test)
587
    result.stderr.fnmatch_lines([
588
        "Comparing against benchmarks from: *0001_unversioned_*.json",
589
    ])
590
591
592
def test_compare_non_existing(testdir):
593
    test = testdir.makepyfile(SIMPLE_TEST)
594
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
595
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', '-rw',
596
                               test)
597
    result.stdout.fnmatch_lines([
598
        "* Can't compare. No benchmark files * '0002'.",
599
    ])
600
601
602
def test_compare_non_existing_verbose(testdir):
603
    test = testdir.makepyfile(SIMPLE_TEST)
604
    testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-autosave', test)
605
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002',
606
                               test, '--benchmark-verbose')
607
    result.stderr.fnmatch_lines([
608
        " WARNING: Can't compare. No benchmark files * '0002'.",
609
    ])
610
611
612
def test_compare_no_files(testdir):
613
    test = testdir.makepyfile(SIMPLE_TEST)
614
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
615
                               test, '--benchmark-compare')
616
    result.stdout.fnmatch_lines([
617
        "* Can't compare. No benchmark files in '*'."
618
        " Can't load the previous benchmark."
619
    ])
620
621
622
def test_compare_no_files_verbose(testdir):
623
    test = testdir.makepyfile(SIMPLE_TEST)
624
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
625
                               test, '--benchmark-compare', '--benchmark-verbose')
626
    result.stderr.fnmatch_lines([
627
        " WARNING: Can't compare. No benchmark files in '*'."
628
        " Can't load the previous benchmark."
629
    ])
630
631
632
def test_compare_no_files_match(testdir):
633
    test = testdir.makepyfile(SIMPLE_TEST)
634
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
635
                               test, '--benchmark-compare=1')
636
    result.stdout.fnmatch_lines([
637
        "* Can't compare. No benchmark files in '*' match '1'."
638
    ])
639
640
641
def test_compare_no_files_match_verbose(testdir):
642
    test = testdir.makepyfile(SIMPLE_TEST)
643
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
644
                               test, '--benchmark-compare=1', '--benchmark-verbose')
645
    result.stderr.fnmatch_lines([
646
        " WARNING: Can't compare. No benchmark files in '*' match '1'."
647
    ])
648
649
650
def test_verbose(testdir):
651
    test = testdir.makepyfile(SIMPLE_TEST)
652
    result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-verbose',
653
                               '-vv', test)
654
    result.stderr.fnmatch_lines([
655
        "  Timer precision: *s",
656
        "  Calibrating to target round *s; will estimate when reaching *s.",
657
        "    Measured * iterations: *s.",
658
        "  Running * rounds x * iterations ...",
659
        "  Ran for *s.",
660
    ])
661
662
663
def test_save(testdir):
664
    test = testdir.makepyfile(SIMPLE_TEST)
665
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
666
                               '--benchmark-max-time=0.0000001', test)
667
    result.stderr.fnmatch_lines([
668
        "Saved benchmark data in: *",
669
    ])
670
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
671
672
673
def test_save_extra_info(testdir):
674
    test = testdir.makepyfile("""
675
    def test_extra(benchmark):
676
        benchmark.extra_info['foo'] = 'bar'
677
        benchmark(lambda: None)
678
    """)
679
    result = testdir.runpytest('--doctest-modules', '--benchmark-save=foobar',
680
                               '--benchmark-max-time=0.0000001', test)
681
    result.stderr.fnmatch_lines([
682
        "Saved benchmark data in: *",
683
    ])
684
    info = json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].join('0001_foobar.json').read())
685
    bench_info = info['benchmarks'][0]
686
    assert bench_info['name'] == 'test_extra'
687
    assert bench_info['extra_info'] == {'foo': 'bar'}
688
689
690
691
def test_update_machine_info_hook_detection(testdir):
692
    """Tests detection and execution and update_machine_info_hooks.
693
694
    Verifies that machine info hooks are detected and executed in nested
695
    `conftest.py`s.
696
697
    """
698
699
    record_path_conftest = '''
700
import os
701
702
def pytest_benchmark_update_machine_info(config, machine_info):
703
    machine_info["conftest_path"] = (
704
        machine_info.get("conftest_path", []) + [os.path.relpath(__file__)]
705
    )
706
    '''
707
708
    simple_test = '''
709
def test_simple(benchmark):
710
    @benchmark
711
    def resuilt():
712
        1+1
713
    '''
714
715
    testdir.makepyfile(**{
716
        "conftest" : record_path_conftest,
717
        "test_module/conftest" : record_path_conftest,
718
        "test_module/tests/conftest" : record_path_conftest,
719
        "test_module/tests/simple_test.py" : simple_test,
720
    })
721
722
    def run_verify_pytest(*args):
723
        testdir.runpytest(
724
            '--benchmark-json=benchmark.json',
725
            '--benchmark-max-time=0.0000001',
726
            *args
727
        )
728
729
        benchmark_json = json.loads(testdir.tmpdir.join('benchmark.json').read())
730
        machine_info = benchmark_json["machine_info"]
731
732
        assert sorted(machine_info["conftest_path"]) == sorted([
733
            "conftest.py",
734
            "test_module/conftest.py",
735
            "test_module/tests/conftest.py",
736
        ])
737
738
    run_verify_pytest("test_module/tests")
739
    run_verify_pytest("test_module")
740
    run_verify_pytest(".")
741
742
743
def test_histogram(testdir):
744
    test = testdir.makepyfile(SIMPLE_TEST)
745
    result = testdir.runpytest('--doctest-modules', '--benchmark-histogram=foobar',
746
                               '--benchmark-max-time=0.0000001', test)
747
    result.stderr.fnmatch_lines([
748
        "Generated histogram: *foobar.svg",
749
    ])
750
    assert [f.basename for f in testdir.tmpdir.listdir("*.svg", sort=True)] == [
751
        'foobar.svg',
752
    ]
753
754
755
def test_autosave(testdir):
756
    test = testdir.makepyfile(SIMPLE_TEST)
757
    result = testdir.runpytest('--doctest-modules', '--benchmark-autosave',
758
                               '--benchmark-max-time=0.0000001', test)
759
    result.stderr.fnmatch_lines([
760
        "Saved benchmark data in: *",
761
    ])
762
    json.loads(testdir.tmpdir.join('.benchmarks').listdir()[0].listdir('0001_*.json')[0].read())
763
764
765
def test_bogus_min_time(testdir):
766
    test = testdir.makepyfile(SIMPLE_TEST)
767
    result = testdir.runpytest('--doctest-modules', '--benchmark-min-time=bogus', test)
768
    result.stderr.fnmatch_lines([
769
        "usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
770
        "py*: error: argument --benchmark-min-time: Invalid decimal value 'bogus': InvalidOperation*",
771
    ])
772
773
774
def test_disable_gc(testdir):
775
    test = testdir.makepyfile(SIMPLE_TEST)
776
    result = testdir.runpytest('--benchmark-disable-gc', test)
777
    result.stdout.fnmatch_lines([
778
        "*collected 2 items",
779
        "test_disable_gc.py ..*",
780
        "* benchmark: 2 tests *",
781
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
782
        "------*",
783
        "test_fast          *",
784
        "test_slow          *",
785
        "------*",
786
        "*====== 2 passed* seconds ======*",
787
    ])
788
789
790
def test_custom_timer(testdir):
791
    test = testdir.makepyfile(SIMPLE_TEST)
792
    result = testdir.runpytest('--benchmark-timer=time.time', test)
793
    result.stdout.fnmatch_lines([
794
        "*collected 2 items",
795
        "test_custom_timer.py ..*",
796
        "* benchmark: 2 tests *",
797
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
798
        "------*",
799
        "test_fast          *",
800
        "test_slow          *",
801
        "------*",
802
        "*====== 2 passed* seconds ======*",
803
    ])
804
805
806
def test_bogus_timer(testdir):
807
    test = testdir.makepyfile(SIMPLE_TEST)
808
    result = testdir.runpytest('--benchmark-timer=bogus', test)
809
    result.stderr.fnmatch_lines([
810
        "usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
811
        "*py*: error: argument --benchmark-timer: Value for --benchmark-timer must be in dotted form. Eg: "
812
        "'module.attr'.",
813
    ])
814
815
816
def test_sort_by_mean(testdir):
817
    test = testdir.makepyfile(SIMPLE_TEST)
818
    result = testdir.runpytest('--benchmark-sort=mean', test)
819
    result.stdout.fnmatch_lines([
820
        "*collected 2 items",
821
        "test_sort_by_mean.py ..*",
822
        "* benchmark: 2 tests *",
823
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
824
        "------*",
825
        "test_fast          *",
826
        "test_slow          *",
827
        "------*",
828
        "*====== 2 passed* seconds ======*",
829
    ])
830
831
832
def test_bogus_sort(testdir):
833
    test = testdir.makepyfile(SIMPLE_TEST)
834
    result = testdir.runpytest('--benchmark-sort=bogus', test)
835
    result.stderr.fnmatch_lines([
836
        "usage: *py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
837
        "*py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one "
838
        "of: 'min', 'max', 'mean', 'stddev', 'name', 'fullname'."
839
    ])
840
841
842
def test_xdist(testdir):
843
    pytest.importorskip('xdist')
844
    test = testdir.makepyfile(SIMPLE_TEST)
845
    result = testdir.runpytest('--doctest-modules', '-n', '1', '-rw', test)
846
    result.stdout.fnmatch_lines([
847
        "* Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be "
848
        "performed reliably in a parallelized environment.",
849
    ])
850
851
852
def test_xdist_verbose(testdir):
853
    pytest.importorskip('xdist')
854
    test = testdir.makepyfile(SIMPLE_TEST)
855
    result = testdir.runpytest('--doctest-modules', '-n', '1', '--benchmark-verbose', test)
856
    result.stderr.fnmatch_lines([
857
        "------*",
858
        " WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed "
859
        "reliably in a parallelized environment.",
860
        "------*",
861
    ])
862
863
864
def test_cprofile(testdir):
865
    test = testdir.makepyfile(SIMPLE_TEST)
866
    result = testdir.runpytest('--benchmark-cprofile=cumtime', test)
867
    result.stdout.fnmatch_lines([
868
        "============*=========== cProfile information ============*===========",
869
        "Time in s",
870
        "test_cprofile.py::test_fast",
871
        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
872
        # "1	0.0000	0.0000	0.0001	0.0001	test_cprofile0/test_cprofile.py:9(result)",
873
        # "1	0.0001	0.0001	0.0001	0.0001	~:0(<built-in method time.sleep>)",
874
        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
875
        "",
876
        "test_cprofile.py::test_slow",
877
        "ncalls	tottime	percall	cumtime	percall	filename:lineno(function)",
878
        # "1	0.0000	0.0000	0.1002	0.1002	test_cprofile0/test_cprofile.py:15(<lambda>)",
879
        # "1	0.1002	0.1002	0.1002	0.1002	~:0(<built-in method time.sleep>)",
880
        # "1	0.0000	0.0000	0.0000	0.0000	~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
881
    ])
882
883
884
def test_abort_broken(testdir):
885
    """
886
    Test that we don't benchmark code that raises exceptions.
887
    """
888
    test = testdir.makepyfile('''
889
"""
890
    >>> print('Yay, doctests!')
891
    Yay, doctests!
892
"""
893
import time
894
import pytest
895
896
def test_bad(benchmark):
897
    @benchmark
898
    def result():
899
        raise Exception()
900
    assert 1 == 1
901
902
def test_bad2(benchmark):
903
    @benchmark
904
    def result():
905
        time.sleep(0.1)
906
    assert 1 == 0
907
908
@pytest.fixture(params=['a', 'b', 'c'])
909
def bad_fixture(request):
910
    raise ImportError()
911
912
def test_ok(benchmark, bad_fixture):
913
    @benchmark
914
    def result():
915
        time.sleep(0.1)
916
    assert 1 == 0
917
''')
918
    result = testdir.runpytest('-vv', test)
919
    result.stdout.fnmatch_lines([
920
        "*collected 5 items",
921
        "*",
922
        "test_abort_broken.py::test_bad FAILED*",
923
        "test_abort_broken.py::test_bad2 FAILED*",
924
        "test_abort_broken.py::test_ok*a* ERROR*",
925
        "test_abort_broken.py::test_ok*b* ERROR*",
926
        "test_abort_broken.py::test_ok*c* ERROR*",
927
928
        "*====== ERRORS ======*",
929
        "*______ ERROR at setup of test_ok[[]a[]] ______*",
930
931
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[a]'>>",
932
933
        "    @pytest.fixture(params=['a', 'b', 'c'])",
934
        "    def bad_fixture(request):",
935
        ">       raise ImportError()",
936
        "E       ImportError",
937
938
        "test_abort_broken.py:22: ImportError",
939
        "*______ ERROR at setup of test_ok[[]b[]] ______*",
940
941
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[b]'>>",
942
943
        "    @pytest.fixture(params=['a', 'b', 'c'])",
944
        "    def bad_fixture(request):",
945
        ">       raise ImportError()",
946
        "E       ImportError",
947
948
        "test_abort_broken.py:22: ImportError",
949
        "*______ ERROR at setup of test_ok[[]c[]] ______*",
950
951
        "request = <SubRequest 'bad_fixture' for <Function 'test_ok[c]'>>",
952
953
        "    @pytest.fixture(params=['a', 'b', 'c'])",
954
        "    def bad_fixture(request):",
955
        ">       raise ImportError()",
956
        "E       ImportError",
957
958
        "test_abort_broken.py:22: ImportError",
959
        "*====== FAILURES ======*",
960
        "*______ test_bad ______*",
961
962
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
963
964
        "    def test_bad(benchmark):",
965
        ">       @benchmark",
966
        "        def result():",
967
968
        "test_abort_broken.py:*",
969
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
970
        "*",
971
        "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _*",
972
973
        "    @benchmark",
974
        "    def result():",
975
        ">       raise Exception()",
976
        "E       Exception",
977
978
        "test_abort_broken.py:11: Exception",
979
        "*______ test_bad2 ______*",
980
981
        "benchmark = <pytest_benchmark.*.BenchmarkFixture object at *>",
982
983
        "    def test_bad2(benchmark):",
984
        "        @benchmark",
985
        "        def result():",
986
        "            time.sleep(0.1)",
987
        ">       assert 1 == 0",
988
        "E       assert 1 == 0",
989
990
        "test_abort_broken.py:18: AssertionError",
991
    ])
992
993
    result.stdout.fnmatch_lines([
994
        "* benchmark: 1 tests *",
995
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
996
        "------*",
997
        "test_bad2           *",
998
        "------*",
999
1000
        "*====== 2 failed*, 3 error* seconds ======*",
1001
    ])
1002
1003
1004
BASIC_TEST = '''
1005
"""
1006
Just to make sure the plugin doesn't choke on doctests::
1007
    >>> print('Yay, doctests!')
1008
    Yay, doctests!
1009
"""
1010
import time
1011
from functools import partial
1012
1013
import pytest
1014
1015
def test_fast(benchmark):
1016
    @benchmark
1017
    def result():
1018
        return time.sleep(0.000001)
1019
    assert result is None
1020
1021
def test_slow(benchmark):
1022
    assert benchmark(partial(time.sleep, 0.001)) is None
1023
1024
def test_slower(benchmark):
1025
    benchmark(lambda: time.sleep(0.01))
1026
1027
@pytest.mark.benchmark(min_rounds=2)
1028
def test_xfast(benchmark):
1029
    benchmark(str)
1030
1031
def test_fast(benchmark):
1032
    benchmark(int)
1033
'''
1034
1035
1036
def test_basic(testdir):
1037
    test = testdir.makepyfile(BASIC_TEST)
1038
    result = testdir.runpytest('-vv', '--doctest-modules', test)
1039
    result.stdout.fnmatch_lines([
1040
        "*collected 5 items",
1041
        "test_basic.py::*test_basic PASSED*",
1042
        "test_basic.py::test_slow PASSED*",
1043
        "test_basic.py::test_slower PASSED*",
1044
        "test_basic.py::test_xfast PASSED*",
1045
        "test_basic.py::test_fast PASSED*",
1046
        "",
1047
        "* benchmark: 4 tests *",
1048
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1049
        "------*",
1050
        "test_*         *",
1051
        "test_*         *",
1052
        "test_*         *",
1053
        "test_*         *",
1054
        "------*",
1055
        "",
1056
        "*====== 5 passed* seconds ======*",
1057
    ])
1058
1059
1060
def test_skip(testdir):
1061
    test = testdir.makepyfile(BASIC_TEST)
1062
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-skip', test)
1063
    result.stdout.fnmatch_lines([
1064
        "*collected 5 items",
1065
        "test_skip.py::*test_skip PASSED*",
1066
        "test_skip.py::test_slow SKIPPED*",
1067
        "test_skip.py::test_slower SKIPPED*",
1068
        "test_skip.py::test_xfast SKIPPED*",
1069
        "test_skip.py::test_fast SKIPPED*",
1070
        "*====== 1 passed, 4 skipped* seconds ======*",
1071
    ])
1072
1073
1074
def test_disable(testdir):
1075
    test = testdir.makepyfile(BASIC_TEST)
1076
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test)
1077
    result.stdout.fnmatch_lines([
1078
        "*collected 5 items",
1079
        "test_disable.py::*test_disable PASSED*",
1080
        "test_disable.py::test_slow PASSED*",
1081
        "test_disable.py::test_slower PASSED*",
1082
        "test_disable.py::test_xfast PASSED*",
1083
        "test_disable.py::test_fast PASSED*",
1084
        "*====== 5 passed * seconds ======*",
1085
    ])
1086
1087
1088
def test_mark_selection(testdir):
1089
    test = testdir.makepyfile(BASIC_TEST)
1090
    result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test)
1091
    result.stdout.fnmatch_lines([
1092
        "*collected 5 items*",
1093
        "test_mark_selection.py::test_xfast PASSED*",
1094
        "* benchmark: 1 tests *",
1095
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1096
        "------*",
1097
        "test_xfast       *",
1098
        "------*",
1099
        "*====== 1 passed, 4 deselected* seconds ======*",
1100
    ])
1101
1102
1103
def test_only_benchmarks(testdir):
1104
    test = testdir.makepyfile(BASIC_TEST)
1105
    result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-only', test)
1106
    result.stdout.fnmatch_lines([
1107
        "*collected 5 items",
1108
        "test_only_benchmarks.py::*test_only_benchmarks SKIPPED*",
1109
        "test_only_benchmarks.py::test_slow PASSED*",
1110
        "test_only_benchmarks.py::test_slower PASSED*",
1111
        "test_only_benchmarks.py::test_xfast PASSED*",
1112
        "test_only_benchmarks.py::test_fast PASSED*",
1113
        "* benchmark: 4 tests *",
1114
        "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
1115
        "------*",
1116
        "test_*         *",
1117
        "test_*         *",
1118
        "test_*         *",
1119
        "test_*         *",
1120
        "------*",
1121
        "*====== 4 passed, 1 skipped* seconds ======*",
1122
    ])
1123
1124
1125
def test_columns(testdir):
1126
    test = testdir.makepyfile(SIMPLE_TEST)
1127
    result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,iterations,min', test)
1128
    result.stdout.fnmatch_lines([
1129
        "*collected 3 items",
1130
        "test_columns.py ...*",
1131
        "* benchmark: 2 tests *",
1132
        "Name (time in ?s) * Max * Iterations * Min *",
1133
        "------*",
1134
    ])
1135