Completed
Push — master ( 510a7c...995399 )
by Ionel Cristian
13s
created

pytest_collection_modifyitems()   B

Complexity

Conditions 5

Size

Total Lines 22

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 0
Metric Value
cc 5
c 1
b 0
f 0
dl 0
loc 22
rs 8.3411
1
from __future__ import division
2
from __future__ import print_function
3
4
import argparse
5
import operator
6
import platform
7
import sys
8
import traceback
9
from collections import defaultdict
10
from datetime import datetime
11
12
import pytest
13
14
from . import __version__
15
from .fixture import BenchmarkFixture
16
from .session import BenchmarkSession
17
from .session import PerformanceRegression
18
from .timers import default_timer
19
from .utils import NameWrapper
20
from .utils import format_dict
21
from .utils import get_commit_info
22
from .utils import get_current_time
23
from .utils import get_tag
24
from .utils import parse_columns
25
from .utils import parse_compare_fail
26
from .utils import parse_name_format
27
from .utils import parse_rounds
28
from .utils import parse_save
29
from .utils import parse_seconds
30
from .utils import parse_sort
31
from .utils import parse_timer
32
from .utils import parse_warmup
33
34
35
def pytest_report_header(config):
36
    bs = config._benchmarksession
37
38
    return ("benchmark: {version} (defaults:"
39
            " timer={timer}"
40
            " disable_gc={0[disable_gc]}"
41
            " min_rounds={0[min_rounds]}"
42
            " min_time={0[min_time]}"
43
            " max_time={0[max_time]}"
44
            " calibration_precision={0[calibration_precision]}"
45
            " warmup={0[warmup]}"
46
            " warmup_iterations={0[warmup_iterations]}"
47
            ")").format(
48
        bs.options,
49
        version=__version__,
50
        timer=bs.options.get("timer"),
51
    )
52
53
54
def add_display_options(addoption, prefix="benchmark-"):
55
    addoption(
56
        "--{0}sort".format(prefix),
57
        metavar="COL", type=parse_sort, default="min",
58
        help="Column to sort on. Can be one of: 'min', 'max', 'mean', 'stddev', "
59
             "'name', 'fullname'. Default: %(default)r"
60
    )
61
    addoption(
62
        "--{0}group-by".format(prefix),
63
        metavar="LABEL", default="group",
64
        help="How to group tests. Can be one of: 'group', 'name', 'fullname', 'func', 'fullfunc', "
65
             "'param' or 'param:NAME', where NAME is the name passed to @pytest.parametrize."
66
             " Default: %(default)r"
67
    )
68
    addoption(
69
        "--{0}columns".format(prefix),
70
        metavar="LABELS", type=parse_columns,
71
        default=["min", "max", "mean", "stddev", "median", "iqr", "outliers", "ops", "rounds", "iterations"],
72
        help="Comma-separated list of columns to show in the result table. Default: "
73
             "'min, max, mean, stddev, median, iqr, outliers, rounds, iterations'"
74
    )
75
    addoption(
76
        "--{0}name".format(prefix),
77
        metavar="FORMAT", type=parse_name_format,
78
        default="normal",
79
        help="How to format names in results. Can be one of 'short', 'normal', 'long'. Default: %(default)r"
80
    )
81
82
83
def add_histogram_options(addoption, prefix="benchmark-"):
84
    filename_prefix = "benchmark_%s" % get_current_time()
85
    addoption(
86
        "--{0}histogram".format(prefix),
87
        action="append", metavar="FILENAME-PREFIX", nargs="?", default=[], const=filename_prefix,
88
        help="Plot graphs of min/max/avg/stddev over time in FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX contains"
89
             " slashes ('/') then directories will be created. Default: %r" % filename_prefix
90
    )
91
92
93
def add_csv_options(addoption, prefix="benchmark-"):
94
    filename_prefix = "benchmark_%s" % get_current_time()
95
    addoption(
96
        "--{0}csv".format(prefix),
97
        action="append", metavar="FILENAME", nargs="?", default=[], const=filename_prefix,
98
        help="Save a csv report. If FILENAME contains"
99
             " slashes ('/') then directories will be created. Default: %r" % filename_prefix
100
    )
101
102
103
def add_global_options(addoption, prefix="benchmark-"):
104
    addoption(
105
        "--{0}storage".format(prefix), *[] if prefix else ['-s'],
106
        metavar="URI", default="file://./.benchmarks",
107
        help="Specify a path to store the runs as uri in form file://path or"
108
             " elasticsearch+http[s]://host1,host2/[index/doctype?project_name=Project] "
109
             "(when --benchmark-save or --benchmark-autosave are used). For backwards compatibility unexpected values "
110
             "are converted to file://<value>. Default: %(default)r."
111
    )
112
    addoption(
113
        "--{0}netrc".format(prefix),
114
        nargs="?", default='', const='~/.netrc',
115
        help="Load elasticsearch credentials from a netrc file. Default: %(default)r.",
116
    )
117
    addoption(
118
        "--{0}verbose".format(prefix), *[] if prefix else ['-v'],
119
        action="store_true", default=False,
120
        help="Dump diagnostic and progress information."
121
    )
122
123
124
def pytest_addoption(parser):
125
    group = parser.getgroup("benchmark")
126
    group.addoption(
127
        "--benchmark-min-time",
128
        metavar="SECONDS", type=parse_seconds, default="0.000005",
129
        help="Minimum time per round in seconds. Default: %(default)r"
130
    )
131
    group.addoption(
132
        "--benchmark-max-time",
133
        metavar="SECONDS", type=parse_seconds, default="1.0",
134
        help="Maximum run time per test - it will be repeated until this total time is reached. It may be "
135
             "exceeded if test function is very slow or --benchmark-min-rounds is large (it takes precedence). "
136
             "Default: %(default)r"
137
    )
138
    group.addoption(
139
        "--benchmark-min-rounds",
140
        metavar="NUM", type=parse_rounds, default=5,
141
        help="Minimum rounds, even if total time would exceed `--max-time`. Default: %(default)r"
142
    )
143
    group.addoption(
144
        "--benchmark-timer",
145
        metavar="FUNC", type=parse_timer, default=str(NameWrapper(default_timer)),
146
        help="Timer to use when measuring time. Default: %(default)r"
147
    )
148
    group.addoption(
149
        "--benchmark-calibration-precision",
150
        metavar="NUM", type=int, default=10,
151
        help="Precision to use when calibrating number of iterations. Precision of 10 will make the timer look 10 times"
152
             " more accurate, at a cost of less precise measure of deviations. Default: %(default)r"
153
    )
154
    group.addoption(
155
        "--benchmark-warmup",
156
        metavar="KIND", nargs="?", default=parse_warmup("auto"), type=parse_warmup,
157
        help="Activates warmup. Will run the test function up to number of times in the calibration phase. "
158
             "See `--benchmark-warmup-iterations`. Note: Even the warmup phase obeys --benchmark-max-time. "
159
             "Available KIND: 'auto', 'off', 'on'. Default: 'auto' (automatically activate on PyPy)."
160
    )
161
    group.addoption(
162
        "--benchmark-warmup-iterations",
163
        metavar="NUM", type=int, default=100000,
164
        help="Max number of iterations to run in the warmup phase. Default: %(default)r"
165
    )
166
    group.addoption(
167
        "--benchmark-disable-gc",
168
        action="store_true", default=False,
169
        help="Disable GC during benchmarks."
170
    )
171
    group.addoption(
172
        "--benchmark-skip",
173
        action="store_true", default=False,
174
        help="Skip running any tests that contain benchmarks."
175
    )
176
    group.addoption(
177
        "--benchmark-disable",
178
        action="store_true", default=False,
179
        help="Disable benchmarks. Benchmarked functions are only ran once and no stats are reported. Use this is you "
180
             "want to run the test but don't do any benchmarking."
181
    )
182
    group.addoption(
183
        "--benchmark-enable",
184
        action="store_true", default=False,
185
        help="Forcibly enable benchmarks. Use this option to override --benchmark-disable (in case you have it in "
186
             "pytest configuration)."
187
    )
188
    group.addoption(
189
        "--benchmark-only",
190
        action="store_true", default=False,
191
        help="Only run benchmarks."
192
    )
193
    group.addoption(
194
        "--benchmark-save",
195
        metavar="NAME", type=parse_save,
196
        help="Save the current run into 'STORAGE-PATH/counter_NAME.json'."
197
    )
198
    tag = get_tag()
199
    group.addoption(
200
        "--benchmark-autosave",
201
        action='store_const', const=tag,
202
        help="Autosave the current run into 'STORAGE-PATH/counter_%s.json" % tag,
203
    )
204
    group.addoption(
205
        "--benchmark-save-data",
206
        action="store_true",
207
        help="Use this to make --benchmark-save and --benchmark-autosave include all the timing data,"
208
             " not just the stats.",
209
    )
210
    group.addoption(
211
        "--benchmark-json",
212
        metavar="PATH", type=argparse.FileType('wb'),
213
        help="Dump a JSON report into PATH. "
214
             "Note that this will include the complete data (all the timings, not just the stats)."
215
    )
216
    group.addoption(
217
        "--benchmark-compare",
218
        metavar="NUM|_ID", nargs="?", default=[], const=True,
219
        help="Compare the current run against run NUM (or prefix of _id in elasticsearch) or the latest "
220
             "saved run if unspecified."
221
    )
222
    group.addoption(
223
        "--benchmark-compare-fail",
224
        metavar="EXPR", nargs="+", type=parse_compare_fail,
225
        help="Fail test if performance regresses according to given EXPR"
226
             " (eg: min:5%% or mean:0.001 for number of seconds). Can be used multiple times."
227
    )
228
    group.addoption(
229
        "--benchmark-cprofile",
230
        metavar="COLUMN", default=None,
231
        choices=['ncalls_recursion', 'ncalls', 'tottime', 'tottime_per', 'cumtime', 'cumtime_per', 'function_name'],
232
        help="If specified measure one run with cProfile and stores 10 top functions."
233
             " Argument is a column to sort by. Available columns: 'ncallls_recursion',"
234
             " 'ncalls', 'tottime', 'tottime_per', 'cumtime', 'cumtime_per', 'function_name'."
235
    )
236
    add_global_options(group.addoption)
237
    add_display_options(group.addoption)
238
    add_histogram_options(group.addoption)
239
240
241
def pytest_addhooks(pluginmanager):
242
    from . import hookspec
243
244
    method = getattr(pluginmanager, "add_hookspecs", None)
245
    if method is None:
246
        method = pluginmanager.addhooks
247
    method(hookspec)
248
249
250
def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
251
    machine_info = format_dict(machine_info)
252
    compared_machine_info = format_dict(compared_benchmark["machine_info"])
253
254
    if compared_machine_info != machine_info:
255
        benchmarksession.logger.warn(
256
            "BENCHMARK-C6",
257
            "Benchmark machine_info is different. Current: %s VS saved: %s." % (
258
                machine_info,
259
                compared_machine_info,
260
            ),
261
            fslocation=benchmarksession.storage.location
262
        )
263
264
265
if hasattr(pytest, 'hookimpl'):
266
    _hookwrapper = pytest.hookimpl(hookwrapper=True)
267
else:
268
    _hookwrapper = pytest.mark.hookwrapper
269
270
271
def pytest_collection_modifyitems(items, config):
272
    selected_items = []
273
    deselected_items = []
274
275
    only = config.getvalue("benchmark_only", False)
276
    skip = config.getvalue("benchmark_skip", False)
277
278
    for item in items:
279
        uses_benchmark = hasattr(item, "funcargnames") and "benchmark" in item.funcargnames
280
        if uses_benchmark:
281
            if skip:
282
                deselected_items.append(item)
283
            else:
284
                selected_items.append(item)
285
        else:
286
            if only:
287
                deselected_items.append(item)
288
            else:
289
                selected_items.append(item)
290
291
        config.hook.pytest_deselected(items=deselected_items)
292
        items[:] = selected_items
293
294
295
def pytest_benchmark_group_stats(config, benchmarks, group_by):
296
    groups = defaultdict(list)
297
    for bench in benchmarks:
298
        key = ()
299
        for grouping in group_by.split(','):
300
            if grouping == "group":
301
                key += bench["group"],
302
            elif grouping == "name":
303
                key += bench["name"],
304
            elif grouping == "func":
305
                key += bench["name"].split("[")[0],
306
            elif grouping == "fullname":
307
                key += bench["fullname"],
308
            elif grouping == "fullfunc":
309
                key += bench["fullname"].split("[")[0],
310
            elif grouping == "param":
311
                key += bench["param"],
312
            elif grouping.startswith("param:"):
313
                param_name = grouping[len("param:"):]
314
                key += '%s=%s' % (param_name, bench["params"][param_name]),
315
            else:
316
                raise NotImplementedError("Unsupported grouping %r." % group_by)
317
        groups[' '.join(str(p) for p in key if p) or None].append(bench)
318
319
    for grouped_benchmarks in groups.values():
320
        grouped_benchmarks.sort(key=operator.itemgetter("fullname" if "full" in group_by else "name"))
321
    return sorted(groups.items(), key=lambda pair: pair[0] or "")
322
323
324
@_hookwrapper
325
def pytest_sessionfinish(session, exitstatus):
326
    session.config._benchmarksession.finish()
327
    yield
328
329
330
def pytest_terminal_summary(terminalreporter):
331
    try:
332
        terminalreporter.config._benchmarksession.display(terminalreporter)
333
    except PerformanceRegression:
334
        raise
335
    except Exception:
336
        terminalreporter.config._benchmarksession.logger.error("\n%s" % traceback.format_exc())
337
        raise
338
339
340
def get_cpu_info():
341
    import cpuinfo
342
    all_info = cpuinfo.get_cpu_info()
343
    all_info = all_info or {}
344
    info = {}
345
    for key in ('vendor_id', 'hardware', 'brand'):
346
        info[key] = all_info.get(key, 'unknown')
347
    return info
348
349
350
def pytest_benchmark_generate_machine_info():
351
    python_implementation = platform.python_implementation()
352
    python_implementation_version = platform.python_version()
353
    if python_implementation == 'PyPy':
354
        python_implementation_version = '%d.%d.%d' % sys.pypy_version_info[:3]
355
        if sys.pypy_version_info.releaselevel != 'final':
356
            python_implementation_version += '-%s%d' % sys.pypy_version_info[3:]
357
    return {
358
        "node": platform.node(),
359
        "processor": platform.processor(),
360
        "machine": platform.machine(),
361
        "python_compiler": platform.python_compiler(),
362
        "python_implementation": python_implementation,
363
        "python_implementation_version": python_implementation_version,
364
        "python_version": platform.python_version(),
365
        "python_build": platform.python_build(),
366
        "release": platform.release(),
367
        "system": platform.system(),
368
        "cpu": get_cpu_info(),
369
    }
370
371
372
def pytest_benchmark_generate_commit_info(config):
373
    return get_commit_info(config.getoption("benchmark_project_name", None))
374
375
376
def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_info, commit_info):
377
    benchmarks_json = []
378
    output_json = {
379
        "machine_info": machine_info,
380
        "commit_info": commit_info,
381
        "benchmarks": benchmarks_json,
382
        "datetime": datetime.utcnow().isoformat(),
383
        "version": __version__,
384
    }
385
    for bench in benchmarks:
386
        if not bench.has_error:
387
            benchmarks_json.append(bench.as_dict(include_data=include_data))
388
    return output_json
389
390
391
@pytest.fixture(scope="function")
392
def benchmark(request):
393
    bs = request.config._benchmarksession
394
395
    if bs.skip:
396
        pytest.skip("Benchmarks are skipped (--benchmark-skip was used).")
397
    else:
398
        node = request.node
399
        marker = node.get_marker("benchmark")
400
        options = marker.kwargs if marker else {}
401
        if "timer" in options:
402
            options["timer"] = NameWrapper(options["timer"])
403
        fixture = BenchmarkFixture(
404
            node,
405
            add_stats=bs.benchmarks.append,
406
            logger=bs.logger,
407
            warner=request.node.warn,
408
            disabled=bs.disabled,
409
            **dict(bs.options, **options)
410
        )
411
        request.addfinalizer(fixture._cleanup)
412
        return fixture
413
414
415
@pytest.fixture(scope="function")
416
def benchmark_weave(benchmark):
417
    return benchmark.weave
418
419
420
def pytest_runtest_setup(item):
421
    marker = item.get_marker("benchmark")
422
    if marker:
423
        if marker.args:
424
            raise ValueError("benchmark mark can't have positional arguments.")
425
        for name in marker.kwargs:
426
            if name not in (
427
                    "max_time", "min_rounds", "min_time", "timer", "group", "disable_gc", "warmup",
428
                    "warmup_iterations", "calibration_precision"):
429
                raise ValueError("benchmark mark can't have %r keyword argument." % name)
430
431
432
@pytest.mark.trylast  # force the other plugins to initialise, fixes issue with capture not being properly initialised
433
def pytest_configure(config):
434
    config.addinivalue_line("markers", "benchmark: mark a test with custom benchmark settings.")
435
    bs = config._benchmarksession = BenchmarkSession(config)
436
    bs.handle_loading()
437
    config.pluginmanager.register(bs, "pytest-benchmark")
438