Completed
Pull Request — master (#58)
by
unknown
01:15
created

get_report_backend()   A

Complexity

Conditions 4

Size

Total Lines 17

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 0
Metric Value
cc 4
c 1
b 0
f 0
dl 0
loc 17
rs 9.2
1
from __future__ import division
2
from __future__ import print_function
3
4
import argparse
5
import operator
6
import platform
7
import sys
8
import traceback
9
from collections import defaultdict
10
from datetime import datetime
11
12
import pytest
13
14
from . import __version__
15
from .fixture import BenchmarkFixture
16
from .report_backend import ElasticReportBackend, FileReportBackend
17
from .session import BenchmarkSession
18
from .session import PerformanceRegression
19
from .timers import default_timer
20
from .utils import NameWrapper, parse_name_format
21
from .utils import format_dict
22
from .utils import get_commit_info
23
from .utils import get_current_time
24
from .utils import get_tag
25
from .utils import parse_columns
26
from .utils import parse_compare_fail
27
from .utils import parse_rounds
28
from .utils import parse_save
29
from .utils import parse_seconds
30
from .utils import parse_sort
31
from .utils import parse_timer
32
from .utils import parse_warmup
33
from .utils import parse_elasticsearch_storage
34
35
36
def pytest_report_header(config):
37
    bs = config._benchmarksession
38
39
    return ("benchmark: {version} (defaults:"
40
            " timer={timer}"
41
            " disable_gc={0[disable_gc]}"
42
            " min_rounds={0[min_rounds]}"
43
            " min_time={0[min_time]}"
44
            " max_time={0[max_time]}"
45
            " calibration_precision={0[calibration_precision]}"
46
            " warmup={0[warmup]}"
47
            " warmup_iterations={0[warmup_iterations]}"
48
            ")").format(
49
        bs.options,
50
        version=__version__,
51
        timer=bs.options.get("timer"),
52
    )
53
54
55
def add_display_options(addoption, prefix="benchmark-"):
56
    addoption(
57
        "--{0}sort".format(prefix),
58
        metavar="COL", type=parse_sort, default="min",
59
        help="Column to sort on. Can be one of: 'min', 'max', 'mean', 'stddev', "
60
             "'name', 'fullname'. Default: %(default)r"
61
    )
62
    addoption(
63
        "--{0}group-by".format(prefix),
64
        metavar="LABEL", default="group",
65
        help="How to group tests. Can be one of: 'group', 'name', 'fullname', 'func', 'fullfunc', "
66
             "'param' or 'param:NAME', where NAME is the name passed to @pytest.parametrize."
67
             " Default: %(default)r"
68
    )
69
    addoption(
70
        "--{0}columns".format(prefix),
71
        metavar="LABELS", type=parse_columns,
72
        default="min, max, mean, stddev, median, iqr, outliers, rounds, iterations",
73
        help="Comma-separated list of columns to show in the result table. Default: %(default)r"
74
    )
75
    addoption(
76
        "--{0}name".format(prefix),
77
        metavar="FORMAT", type=parse_name_format,
78
        default="normal",
79
        help="How to format names in results. Can be one of 'short', 'normal', 'long'. Default: %(default)r"
80
    )
81
82
83
def add_histogram_options(addoption, prefix="benchmark-"):
84
    filename_prefix = "benchmark_%s" % get_current_time()
85
    addoption(
86
        "--{0}histogram".format(prefix),
87
        action="append", metavar="FILENAME-PREFIX", nargs="?", default=[], const=filename_prefix,
88
        help="Plot graphs of min/max/avg/stddev over time in FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX contains"
89
             " slashes ('/') then directories will be created. Default: %r" % filename_prefix
90
    )
91
92
93
def add_csv_options(addoption, prefix="benchmark-"):
94
    filename_prefix = "benchmark_%s" % get_current_time()
95
    addoption(
96
        "--{0}csv".format(prefix),
97
        action="append", metavar="FILENAME", nargs="?", default=[], const=filename_prefix,
98
        help="Save a csv report. If FILENAME contains"
99
             " slashes ('/') then directories will be created. Default: %r" % filename_prefix
100
    )
101
102
103
def add_global_options(addoption, prefix="benchmark-"):
104
    addoption(
105
        "--{0}storage".format(prefix),
106
        metavar="URI", default="file://./.benchmarks",
107
        help="Specify a path to store the runs as uri in form file://path or"
108
             " elasticsearch+http[s]://host1,host2/[index/doctype?project_name=Project] "
109
             "(when --benchmark-save or --benchmark-autosave are used). "
110
             "Default: %(default)r.",
111
    )
112
    addoption(
113
        "--{0}verbose".format(prefix),
114
        action="store_true", default=False,
115
        help="Dump diagnostic and progress information."
116
    )
117
118
119
def pytest_addoption(parser):
120
    group = parser.getgroup("benchmark")
121
    group.addoption(
122
        "--benchmark-min-time",
123
        metavar="SECONDS", type=parse_seconds, default="0.000005",
124
        help="Minimum time per round in seconds. Default: %(default)r"
125
    )
126
    group.addoption(
127
        "--benchmark-max-time",
128
        metavar="SECONDS", type=parse_seconds, default="1.0",
129
        help="Maximum run time per test - it will be repeated until this total time is reached. It may be "
130
             "exceeded if test function is very slow or --benchmark-min-rounds is large (it takes precedence). "
131
             "Default: %(default)r"
132
    )
133
    group.addoption(
134
        "--benchmark-min-rounds",
135
        metavar="NUM", type=parse_rounds, default=5,
136
        help="Minimum rounds, even if total time would exceed `--max-time`. Default: %(default)r"
137
    )
138
    group.addoption(
139
        "--benchmark-timer",
140
        metavar="FUNC", type=parse_timer, default=str(NameWrapper(default_timer)),
141
        help="Timer to use when measuring time. Default: %(default)r"
142
    )
143
    group.addoption(
144
        "--benchmark-calibration-precision",
145
        metavar="NUM", type=int, default=10,
146
        help="Precision to use when calibrating number of iterations. Precision of 10 will make the timer look 10 times"
147
             " more accurate, at a cost of less precise measure of deviations. Default: %(default)r"
148
    )
149
    group.addoption(
150
        "--benchmark-warmup",
151
        metavar="KIND", nargs="?", default=parse_warmup("auto"), type=parse_warmup,
152
        help="Activates warmup. Will run the test function up to number of times in the calibration phase. "
153
             "See `--benchmark-warmup-iterations`. Note: Even the warmup phase obeys --benchmark-max-time. "
154
             "Available KIND: 'auto', 'off', 'on'. Default: 'auto' (automatically activate on PyPy)."
155
    )
156
    group.addoption(
157
        "--benchmark-warmup-iterations",
158
        metavar="NUM", type=int, default=100000,
159
        help="Max number of iterations to run in the warmup phase. Default: %(default)r"
160
    )
161
    group.addoption(
162
        "--benchmark-disable-gc",
163
        action="store_true", default=False,
164
        help="Disable GC during benchmarks."
165
    )
166
    group.addoption(
167
        "--benchmark-skip",
168
        action="store_true", default=False,
169
        help="Skip running any tests that contain benchmarks."
170
    )
171
    group.addoption(
172
        "--benchmark-disable",
173
        action="store_true", default=False,
174
        help="Disable benchmarks. Benchmarked functions are only ran once and no stats are reported. Use this is you "
175
             "want to run the test but don't do any benchmarking."
176
    )
177
    group.addoption(
178
        "--benchmark-enable",
179
        action="store_true", default=False,
180
        help="Forcibly enable benchmarks. Use this option to override --benchmark-disable (in case you have it in "
181
             "pytest configuration)."
182
    )
183
    group.addoption(
184
        "--benchmark-only",
185
        action="store_true", default=False,
186
        help="Only run benchmarks."
187
    )
188
    group.addoption(
189
        "--benchmark-save",
190
        metavar="NAME", type=parse_save,
191
        help="Save the current run into 'STORAGE-PATH/counter_NAME.json'."
192
    )
193
    tag = get_tag()
194
    group.addoption(
195
        "--benchmark-autosave",
196
        action='store_const', const=tag,
197
        help="Autosave the current run into 'STORAGE-PATH/counter_%s.json" % tag,
198
    )
199
    group.addoption(
200
        "--benchmark-save-data",
201
        action="store_true",
202
        help="Use this to make --benchmark-save and --benchmark-autosave include all the timing data,"
203
             " not just the stats.",
204
    )
205
    group.addoption(
206
        "--benchmark-json",
207
        metavar="PATH", type=argparse.FileType('wb'),
208
        help="Dump a JSON report into PATH. "
209
             "Note that this will include the complete data (all the timings, not just the stats)."
210
    )
211
    group.addoption(
212
        "--benchmark-compare",
213
        metavar="NUM|_ID", nargs="?", default=[], const=True,
214
        help="Compare the current run against run NUM (or prefix of _id in elasticsearch) or the latest "
215
             "saved run if unspecified."
216
    )
217
    group.addoption(
218
        "--benchmark-compare-fail",
219
        metavar="EXPR", nargs="+", type=parse_compare_fail,
220
        help="Fail test if performance regresses according to given EXPR"
221
             " (eg: min:5%% or mean:0.001 for number of seconds). Can be used multiple times."
222
    )
223
    add_global_options(group.addoption)
224
    add_display_options(group.addoption)
225
    add_histogram_options(group.addoption)
226
227
228
def pytest_addhooks(pluginmanager):
229
    from . import hookspec
230
231
    method = getattr(pluginmanager, "add_hookspecs", None)
232
    if method is None:
233
        method = pluginmanager.addhooks
234
    method(hookspec)
235
236
237
def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
238
    machine_info = format_dict(machine_info)
239
    compared_machine_info = format_dict(compared_benchmark["machine_info"])
240
241
    if compared_machine_info != machine_info:
242
        benchmarksession.logger.warn(
243
            "BENCHMARK-C6",
244
            "Benchmark machine_info is different. Current: %s VS saved: %s." % (
245
                machine_info,
246
                compared_machine_info,
247
            ),
248
            fslocation=benchmarksession.storage.location
249
        )
250
251
if hasattr(pytest, 'hookimpl'):
252
    _hookwrapper = pytest.hookimpl(hookwrapper=True)
253
else:
254
    _hookwrapper = pytest.mark.hookwrapper
255
256
257
@_hookwrapper
258
def pytest_runtest_call(item):
259
    bs = item.config._benchmarksession
260
    fixure = hasattr(item, "funcargs") and item.funcargs.get("benchmark")
261
    if isinstance(fixure, BenchmarkFixture):
262
        if bs.skip:
263
            pytest.skip("Skipping benchmark (--benchmark-skip active).")
264
        else:
265
            yield
266
    else:
267
        if bs.only:
268
            pytest.skip("Skipping non-benchmark (--benchmark-only active).")
269
        else:
270
            yield
271
272
273
def pytest_benchmark_group_stats(config, benchmarks, group_by):
274
    groups = defaultdict(list)
275
    for bench in benchmarks:
276
        key = ()
277
        for grouping in group_by.split(','):
278
            if grouping == "group":
279
                key += bench["group"],
280
            elif grouping == "name":
281
                key += bench["name"],
282
            elif grouping == "func":
283
                key += bench["name"].split("[")[0],
284
            elif grouping == "fullname":
285
                key += bench["fullname"],
286
            elif grouping == "fullfunc":
287
                key += bench["fullname"].split("[")[0],
288
            elif grouping == "param":
289
                key += bench["param"],
290
            elif grouping.startswith("param:"):
291
                param_name = grouping[len("param:"):]
292
                key += '%s=%s' % (param_name, bench["params"][param_name]),
293
            else:
294
                raise NotImplementedError("Unsupported grouping %r." % group_by)
295
        groups[' '.join(str(p) for p in key if p) or None].append(bench)
296
297
    for grouped_benchmarks in groups.values():
298
        grouped_benchmarks.sort(key=operator.itemgetter("fullname" if "full" in group_by else "name"))
299
    return sorted(groups.items(), key=lambda pair: pair[0] or "")
300
301
302
@_hookwrapper
303
def pytest_sessionfinish(session, exitstatus):
304
    session.config._benchmarksession.finish()
305
    yield
306
307
308
def pytest_terminal_summary(terminalreporter):
309
    try:
310
        terminalreporter.config._benchmarksession.display(terminalreporter)
311
    except PerformanceRegression:
312
        raise
313
    except Exception:
314
        terminalreporter.config._benchmarksession.logger.error("\n%s" % traceback.format_exc())
315
        raise
316
317
318
def pytest_benchmark_generate_machine_info():
319
    python_implementation = platform.python_implementation()
320
    python_implementation_version = platform.python_version()
321
    if python_implementation == 'PyPy':
322
        python_implementation_version = '%d.%d.%d' % sys.pypy_version_info[:3]
323
        if sys.pypy_version_info.releaselevel != 'final':
324
            python_implementation_version += '-%s%d' % sys.pypy_version_info[3:]
325
    return {
326
        "node": platform.node(),
327
        "processor": platform.processor(),
328
        "machine": platform.machine(),
329
        "python_compiler": platform.python_compiler(),
330
        "python_implementation": python_implementation,
331
        "python_implementation_version": python_implementation_version,
332
        "python_version": platform.python_version(),
333
        "python_build": platform.python_build(),
334
        "release": platform.release(),
335
        "system": platform.system()
336
    }
337
338
339
def pytest_benchmark_generate_commit_info(config):
340
    return get_commit_info(config.getoption("benchmark_project_name", None))
341
342
343
def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_info, commit_info):
344
    benchmarks_json = []
345
    output_json = {
346
        "machine_info": machine_info,
347
        "commit_info": commit_info,
348
        "benchmarks": benchmarks_json,
349
        "datetime": datetime.utcnow().isoformat(),
350
        "version": __version__,
351
    }
352
    for bench in benchmarks:
353
        if not bench.has_error:
354
            benchmarks_json.append(bench.as_dict(include_data=include_data))
355
    return output_json
356
357
358
@pytest.fixture(scope="function")
359
def benchmark(request):
360
    bs = request.config._benchmarksession
361
362
    if bs.skip:
363
        pytest.skip("Benchmarks are skipped (--benchmark-skip was used).")
364
    else:
365
        node = request.node
366
        marker = node.get_marker("benchmark")
367
        options = marker.kwargs if marker else {}
368
        if "timer" in options:
369
            options["timer"] = NameWrapper(options["timer"])
370
        fixture = BenchmarkFixture(
371
            node,
372
            add_stats=bs.benchmarks.append,
373
            logger=bs.logger,
374
            warner=request.node.warn,
375
            disabled=bs.disabled,
376
            **dict(bs.options, **options)
377
        )
378
        request.addfinalizer(fixture._cleanup)
379
        return fixture
380
381
382
@pytest.fixture(scope="function")
383
def benchmark_weave(benchmark):
384
    return benchmark.weave
385
386
387
def pytest_runtest_setup(item):
388
    marker = item.get_marker("benchmark")
389
    if marker:
390
        if marker.args:
391
            raise ValueError("benchmark mark can't have positional arguments.")
392
        for name in marker.kwargs:
393
            if name not in (
394
                    "max_time", "min_rounds", "min_time", "timer", "group", "disable_gc", "warmup",
395
                    "warmup_iterations", "calibration_precision"):
396
                raise ValueError("benchmark mark can't have %r keyword argument." % name)
397
398
399
def get_report_backend(config):
400
    storage = config.getoption("benchmark_storage")
401
    if "://" not in storage:
402
        storage = "file://" + storage
403
    if storage.startswith("file://"):
404
        config.option.__dict__["benchmark_storage"] = storage[len("file://"):]
405
        return FileReportBackend(config)
406
    elif storage.startswith("elasticsearch+"):
407
        hosts, index, doctype, project_name = parse_elasticsearch_storage(storage[len("elasticsearch+"):])
408
        config.option.__dict__["benchmark_elasticsearch_hosts"] = hosts
409
        config.option.__dict__["benchmark_elasticsearch_index"] = index
410
        config.option.__dict__["benchmark_elasticsearch_doctype"] = doctype
411
        config.option.__dict__["benchmark_project_name"] = project_name
412
        config.option.__dict__["benchmark_autosave"] = get_tag(project_name)
413
        return ElasticReportBackend(config)
414
    else:
415
        raise argparse.ArgumentTypeError("Storage must be in form of file://path or elasticsearch+http[s]://host1,host2/index/doctype")
416
417
418
@pytest.mark.trylast  # force the other plugins to initialise, fixes issue with capture not being properly initialised
419
def pytest_configure(config):
420
    config.addinivalue_line("markers", "benchmark: mark a test with custom benchmark settings.")
421
    config._benchmarksession = BenchmarkSession(config, get_report_backend(config))
422
    config.pluginmanager.register(config._benchmarksession, "pytest-benchmark")
423