Completed
Pull Request — master (#61)
by
unknown
01:11
created

get_cpu_info()   A

Complexity

Conditions 2

Size

Total Lines 8

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 1
Metric Value
cc 2
c 1
b 0
f 1
dl 0
loc 8
rs 9.4285
1
from __future__ import division
2
from __future__ import print_function
3
4
import argparse
5
import operator
6
import platform
7
import sys
8
import traceback
9
from collections import defaultdict
10
from datetime import datetime
11
12
import pytest
13
14
from . import __version__
15
from .fixture import BenchmarkFixture
16
from .session import BenchmarkSession
17
from .session import PerformanceRegression
18
from .timers import default_timer
19
from .utils import NameWrapper
20
from .utils import format_dict
21
from .utils import get_commit_info
22
from .utils import get_current_time
23
from .utils import get_tag
24
from .utils import parse_columns
25
from .utils import parse_compare_fail
26
from .utils import parse_name_format
27
from .utils import parse_rounds
28
from .utils import parse_save
29
from .utils import parse_seconds
30
from .utils import parse_sort
31
from .utils import parse_timer
32
from .utils import parse_warmup
33
34
35
def pytest_report_header(config):
36
    bs = config._benchmarksession
37
38
    return ("benchmark: {version} (defaults:"
39
            " timer={timer}"
40
            " disable_gc={0[disable_gc]}"
41
            " min_rounds={0[min_rounds]}"
42
            " min_time={0[min_time]}"
43
            " max_time={0[max_time]}"
44
            " calibration_precision={0[calibration_precision]}"
45
            " warmup={0[warmup]}"
46
            " warmup_iterations={0[warmup_iterations]}"
47
            ")").format(
48
        bs.options,
49
        version=__version__,
50
        timer=bs.options.get("timer"),
51
    )
52
53
54
def add_display_options(addoption, prefix="benchmark-"):
55
    addoption(
56
        "--{0}sort".format(prefix),
57
        metavar="COL", type=parse_sort, default="min",
58
        help="Column to sort on. Can be one of: 'min', 'max', 'mean', 'stddev', "
59
             "'name', 'fullname'. Default: %(default)r"
60
    )
61
    addoption(
62
        "--{0}group-by".format(prefix),
63
        metavar="LABEL", default="group",
64
        help="How to group tests. Can be one of: 'group', 'name', 'fullname', 'func', 'fullfunc', "
65
             "'param' or 'param:NAME', where NAME is the name passed to @pytest.parametrize."
66
             " Default: %(default)r"
67
    )
68
    addoption(
69
        "--{0}columns".format(prefix),
70
        metavar="LABELS", type=parse_columns,
71
        default="min, max, mean, stddev, median, iqr, outliers, rounds, iterations",
72
        help="Comma-separated list of columns to show in the result table. Default: %(default)r"
73
    )
74
    addoption(
75
        "--{0}name".format(prefix),
76
        metavar="FORMAT", type=parse_name_format,
77
        default="normal",
78
        help="How to format names in results. Can be one of 'short', 'normal', 'long'. Default: %(default)r"
79
    )
80
81
82
def add_histogram_options(addoption, prefix="benchmark-"):
83
    filename_prefix = "benchmark_%s" % get_current_time()
84
    addoption(
85
        "--{0}histogram".format(prefix),
86
        action="append", metavar="FILENAME-PREFIX", nargs="?", default=[], const=filename_prefix,
87
        help="Plot graphs of min/max/avg/stddev over time in FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX contains"
88
             " slashes ('/') then directories will be created. Default: %r" % filename_prefix
89
    )
90
91
92
def add_csv_options(addoption, prefix="benchmark-"):
93
    filename_prefix = "benchmark_%s" % get_current_time()
94
    addoption(
95
        "--{0}csv".format(prefix),
96
        action="append", metavar="FILENAME", nargs="?", default=[], const=filename_prefix,
97
        help="Save a csv report. If FILENAME contains"
98
             " slashes ('/') then directories will be created. Default: %r" % filename_prefix
99
    )
100
101
102
def add_global_options(addoption, prefix="benchmark-"):
103
    addoption(
104
        "--{0}storage".format(prefix),
105
        metavar="URI", default="file://./.benchmarks",
106
        help="Specify a path to store the runs as uri in form file://path or"
107
             " elasticsearch+http[s]://host1,host2/[index/doctype?project_name=Project] "
108
             "(when --benchmark-save or --benchmark-autosave are used). For backwards compatibility unexpected values "
109
             "are converted to file://<value>. Default: %(default)r.",
110
    )
111
    addoption(
112
        "--{0}verbose".format(prefix),
113
        action="store_true", default=False,
114
        help="Dump diagnostic and progress information."
115
    )
116
117
118
def pytest_addoption(parser):
119
    group = parser.getgroup("benchmark")
120
    group.addoption(
121
        "--benchmark-min-time",
122
        metavar="SECONDS", type=parse_seconds, default="0.000005",
123
        help="Minimum time per round in seconds. Default: %(default)r"
124
    )
125
    group.addoption(
126
        "--benchmark-max-time",
127
        metavar="SECONDS", type=parse_seconds, default="1.0",
128
        help="Maximum run time per test - it will be repeated until this total time is reached. It may be "
129
             "exceeded if test function is very slow or --benchmark-min-rounds is large (it takes precedence). "
130
             "Default: %(default)r"
131
    )
132
    group.addoption(
133
        "--benchmark-min-rounds",
134
        metavar="NUM", type=parse_rounds, default=5,
135
        help="Minimum rounds, even if total time would exceed `--max-time`. Default: %(default)r"
136
    )
137
    group.addoption(
138
        "--benchmark-timer",
139
        metavar="FUNC", type=parse_timer, default=str(NameWrapper(default_timer)),
140
        help="Timer to use when measuring time. Default: %(default)r"
141
    )
142
    group.addoption(
143
        "--benchmark-calibration-precision",
144
        metavar="NUM", type=int, default=10,
145
        help="Precision to use when calibrating number of iterations. Precision of 10 will make the timer look 10 times"
146
             " more accurate, at a cost of less precise measure of deviations. Default: %(default)r"
147
    )
148
    group.addoption(
149
        "--benchmark-warmup",
150
        metavar="KIND", nargs="?", default=parse_warmup("auto"), type=parse_warmup,
151
        help="Activates warmup. Will run the test function up to number of times in the calibration phase. "
152
             "See `--benchmark-warmup-iterations`. Note: Even the warmup phase obeys --benchmark-max-time. "
153
             "Available KIND: 'auto', 'off', 'on'. Default: 'auto' (automatically activate on PyPy)."
154
    )
155
    group.addoption(
156
        "--benchmark-warmup-iterations",
157
        metavar="NUM", type=int, default=100000,
158
        help="Max number of iterations to run in the warmup phase. Default: %(default)r"
159
    )
160
    group.addoption(
161
        "--benchmark-disable-gc",
162
        action="store_true", default=False,
163
        help="Disable GC during benchmarks."
164
    )
165
    group.addoption(
166
        "--benchmark-skip",
167
        action="store_true", default=False,
168
        help="Skip running any tests that contain benchmarks."
169
    )
170
    group.addoption(
171
        "--benchmark-disable",
172
        action="store_true", default=False,
173
        help="Disable benchmarks. Benchmarked functions are only ran once and no stats are reported. Use this is you "
174
             "want to run the test but don't do any benchmarking."
175
    )
176
    group.addoption(
177
        "--benchmark-enable",
178
        action="store_true", default=False,
179
        help="Forcibly enable benchmarks. Use this option to override --benchmark-disable (in case you have it in "
180
             "pytest configuration)."
181
    )
182
    group.addoption(
183
        "--benchmark-only",
184
        action="store_true", default=False,
185
        help="Only run benchmarks."
186
    )
187
    group.addoption(
188
        "--benchmark-save",
189
        metavar="NAME", type=parse_save,
190
        help="Save the current run into 'STORAGE-PATH/counter_NAME.json'."
191
    )
192
    tag = get_tag()
193
    group.addoption(
194
        "--benchmark-autosave",
195
        action='store_const', const=tag,
196
        help="Autosave the current run into 'STORAGE-PATH/counter_%s.json" % tag,
197
    )
198
    group.addoption(
199
        "--benchmark-save-data",
200
        action="store_true",
201
        help="Use this to make --benchmark-save and --benchmark-autosave include all the timing data,"
202
             " not just the stats.",
203
    )
204
    group.addoption(
205
        "--benchmark-json",
206
        metavar="PATH", type=argparse.FileType('wb'),
207
        help="Dump a JSON report into PATH. "
208
             "Note that this will include the complete data (all the timings, not just the stats)."
209
    )
210
    group.addoption(
211
        "--benchmark-compare",
212
        metavar="NUM|_ID", nargs="?", default=[], const=True,
213
        help="Compare the current run against run NUM (or prefix of _id in elasticsearch) or the latest "
214
             "saved run if unspecified."
215
    )
216
    group.addoption(
217
        "--benchmark-compare-fail",
218
        metavar="EXPR", nargs="+", type=parse_compare_fail,
219
        help="Fail test if performance regresses according to given EXPR"
220
             " (eg: min:5%% or mean:0.001 for number of seconds). Can be used multiple times."
221
    )
222
    group.addoption(
223
        "--benchmark-cprofile",
224
        metavar="COLUMN", default=None,
225
        choices=['ncalls_recursion', 'ncalls', 'tottime', 'tottime_per', 'cumtime', 'cumtime_per', 'function_name'],
226
        help="If specified measure one run with cProfile and stores 10 top functions."
227
             " Argument is a column to sort by. Available columns: 'ncallls_recursion',"
228
             " 'ncalls', 'tottime', 'tottime_per', 'cumtime', 'cumtime_per', 'function_name'."
229
    )
230
    add_global_options(group.addoption)
231
    add_display_options(group.addoption)
232
    add_histogram_options(group.addoption)
233
234
235
def pytest_addhooks(pluginmanager):
236
    from . import hookspec
237
238
    method = getattr(pluginmanager, "add_hookspecs", None)
239
    if method is None:
240
        method = pluginmanager.addhooks
241
    method(hookspec)
242
243
244
def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
245
    machine_info = format_dict(machine_info)
246
    compared_machine_info = format_dict(compared_benchmark["machine_info"])
247
248
    if compared_machine_info != machine_info:
249
        benchmarksession.logger.warn(
250
            "BENCHMARK-C6",
251
            "Benchmark machine_info is different. Current: %s VS saved: %s." % (
252
                machine_info,
253
                compared_machine_info,
254
            ),
255
            fslocation=benchmarksession.storage.location
256
        )
257
258
if hasattr(pytest, 'hookimpl'):
259
    _hookwrapper = pytest.hookimpl(hookwrapper=True)
260
else:
261
    _hookwrapper = pytest.mark.hookwrapper
262
263
264
@_hookwrapper
265
def pytest_runtest_call(item):
266
    bs = item.config._benchmarksession
267
    fixure = hasattr(item, "funcargs") and item.funcargs.get("benchmark")
268
    if isinstance(fixure, BenchmarkFixture):
269
        if bs.skip:
270
            pytest.skip("Skipping benchmark (--benchmark-skip active).")
271
        else:
272
            yield
273
    else:
274
        if bs.only:
275
            pytest.skip("Skipping non-benchmark (--benchmark-only active).")
276
        else:
277
            yield
278
279
280
def pytest_benchmark_group_stats(config, benchmarks, group_by):
281
    groups = defaultdict(list)
282
    for bench in benchmarks:
283
        key = ()
284
        for grouping in group_by.split(','):
285
            if grouping == "group":
286
                key += bench["group"],
287
            elif grouping == "name":
288
                key += bench["name"],
289
            elif grouping == "func":
290
                key += bench["name"].split("[")[0],
291
            elif grouping == "fullname":
292
                key += bench["fullname"],
293
            elif grouping == "fullfunc":
294
                key += bench["fullname"].split("[")[0],
295
            elif grouping == "param":
296
                key += bench["param"],
297
            elif grouping.startswith("param:"):
298
                param_name = grouping[len("param:"):]
299
                key += '%s=%s' % (param_name, bench["params"][param_name]),
300
            else:
301
                raise NotImplementedError("Unsupported grouping %r." % group_by)
302
        groups[' '.join(str(p) for p in key if p) or None].append(bench)
303
304
    for grouped_benchmarks in groups.values():
305
        grouped_benchmarks.sort(key=operator.itemgetter("fullname" if "full" in group_by else "name"))
306
    return sorted(groups.items(), key=lambda pair: pair[0] or "")
307
308
309
@_hookwrapper
310
def pytest_sessionfinish(session, exitstatus):
311
    session.config._benchmarksession.finish()
312
    yield
313
314
315
def pytest_terminal_summary(terminalreporter):
316
    try:
317
        terminalreporter.config._benchmarksession.display(terminalreporter)
318
    except PerformanceRegression:
319
        raise
320
    except Exception:
321
        terminalreporter.config._benchmarksession.logger.error("\n%s" % traceback.format_exc())
322
        raise
323
324
325
def get_cpu_info():
326
    import cpuinfo
327
    all_info = cpuinfo.get_cpu_info()
328
    all_info = all_info or {}
329
    info = {}
330
    for key in ('vendor_id', 'hardware', 'brand'):
331
        info[key] = all_info.get(key, 'unknown')
332
    return info
333
334
335
def pytest_benchmark_generate_machine_info():
336
    python_implementation = platform.python_implementation()
337
    python_implementation_version = platform.python_version()
338
    if python_implementation == 'PyPy':
339
        python_implementation_version = '%d.%d.%d' % sys.pypy_version_info[:3]
340
        if sys.pypy_version_info.releaselevel != 'final':
341
            python_implementation_version += '-%s%d' % sys.pypy_version_info[3:]
342
    return {
343
        "node": platform.node(),
344
        "processor": platform.processor(),
345
        "machine": platform.machine(),
346
        "python_compiler": platform.python_compiler(),
347
        "python_implementation": python_implementation,
348
        "python_implementation_version": python_implementation_version,
349
        "python_version": platform.python_version(),
350
        "python_build": platform.python_build(),
351
        "release": platform.release(),
352
        "system": platform.system(),
353
        "cpu": get_cpu_info(),
354
    }
355
356
357
def pytest_benchmark_generate_commit_info(config):
358
    return get_commit_info(config.getoption("benchmark_project_name", None))
359
360
361
def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_info, commit_info):
362
    benchmarks_json = []
363
    output_json = {
364
        "machine_info": machine_info,
365
        "commit_info": commit_info,
366
        "benchmarks": benchmarks_json,
367
        "datetime": datetime.utcnow().isoformat(),
368
        "version": __version__,
369
    }
370
    for bench in benchmarks:
371
        if not bench.has_error:
372
            benchmarks_json.append(bench.as_dict(include_data=include_data))
373
    return output_json
374
375
376
@pytest.fixture(scope="function")
377
def benchmark(request):
378
    bs = request.config._benchmarksession
379
380
    if bs.skip:
381
        pytest.skip("Benchmarks are skipped (--benchmark-skip was used).")
382
    else:
383
        node = request.node
384
        marker = node.get_marker("benchmark")
385
        options = marker.kwargs if marker else {}
386
        if "timer" in options:
387
            options["timer"] = NameWrapper(options["timer"])
388
        fixture = BenchmarkFixture(
389
            node,
390
            add_stats=bs.benchmarks.append,
391
            logger=bs.logger,
392
            warner=request.node.warn,
393
            disabled=bs.disabled,
394
            **dict(bs.options, **options)
395
        )
396
        request.addfinalizer(fixture._cleanup)
397
        return fixture
398
399
400
@pytest.fixture(scope="function")
401
def benchmark_weave(benchmark):
402
    return benchmark.weave
403
404
405
def pytest_runtest_setup(item):
406
    marker = item.get_marker("benchmark")
407
    if marker:
408
        if marker.args:
409
            raise ValueError("benchmark mark can't have positional arguments.")
410
        for name in marker.kwargs:
411
            if name not in (
412
                    "max_time", "min_rounds", "min_time", "timer", "group", "disable_gc", "warmup",
413
                    "warmup_iterations", "calibration_precision"):
414
                raise ValueError("benchmark mark can't have %r keyword argument." % name)
415
416
417
@pytest.mark.trylast  # force the other plugins to initialise, fixes issue with capture not being properly initialised
418
def pytest_configure(config):
419
    config.addinivalue_line("markers", "benchmark: mark a test with custom benchmark settings.")
420
    config._benchmarksession = BenchmarkSession(config)
421
    config.pluginmanager.register(config._benchmarksession, "pytest-benchmark")
422