Completed
Push — master ( eb8f9b...749362 )
by Ionel Cristian
01:01
created

src.pytest_benchmark.add_csv_options()   A

Complexity

Conditions 1

Size

Total Lines 7

Duplication

Lines 0
Ratio 0 %
Metric Value
cc 1
dl 0
loc 7
rs 9.4285
1
from __future__ import division
2
from __future__ import print_function
3
4
import argparse
5
import operator
6
import platform
7
import sys
8
import traceback
9
from collections import defaultdict
10
from datetime import datetime
11
12
import pytest
13
14
from . import __version__
15
from .fixture import BenchmarkFixture
16
from .session import BenchmarkSession
17
from .session import PerformanceRegression
18
from .timers import default_timer
19
from .utils import NameWrapper, parse_name_format
20
from .utils import format_dict
21
from .utils import get_commit_info
22
from .utils import get_current_time
23
from .utils import get_tag
24
from .utils import parse_columns
25
from .utils import parse_compare_fail
26
from .utils import parse_rounds
27
from .utils import parse_save
28
from .utils import parse_seconds
29
from .utils import parse_sort
30
from .utils import parse_timer
31
from .utils import parse_warmup
32
33
34
def pytest_report_header(config):
35
    bs = config._benchmarksession
36
37
    return ("benchmark: {version} (defaults:"
38
            " timer={timer}"
39
            " disable_gc={0[disable_gc]}"
40
            " min_rounds={0[min_rounds]}"
41
            " min_time={0[min_time]}"
42
            " max_time={0[max_time]}"
43
            " calibration_precision={0[calibration_precision]}"
44
            " warmup={0[warmup]}"
45
            " warmup_iterations={0[warmup_iterations]}"
46
            ")").format(
47
        bs.options,
48
        version=__version__,
49
        timer=bs.options.get("timer"),
50
    )
51
52
53
def add_display_options(addoption, prefix="benchmark-"):
54
    addoption(
55
        "--{0}sort".format(prefix),
56
        metavar="COL", type=parse_sort, default="min",
57
        help="Column to sort on. Can be one of: 'min', 'max', 'mean', 'stddev', "
58
             "'name', 'fullname'. Default: %(default)r"
59
    )
60
    addoption(
61
        "--{0}group-by".format(prefix),
62
        metavar="LABEL", default="group",
63
        help="How to group tests. Can be one of: 'group', 'name', 'fullname', 'func', 'fullfunc', "
64
             "'param' or 'param:NAME', where NAME is the name passed to @pytest.parametrize."
65
             " Default: %(default)r"
66
    )
67
    addoption(
68
        "--{0}columns".format(prefix),
69
        metavar="LABELS", type=parse_columns,
70
        default="min, max, mean, stddev, median, iqr, outliers, rounds, iterations",
71
        help="Comma-separated list of columns to show in the result table. Default: %(default)r"
72
    )
73
    addoption(
74
        "--{0}name".format(prefix),
75
        metavar="FORMAT", type=parse_name_format,
76
        default="normal",
77
        help="How to format names in results. Can be one of 'short', 'normal', 'long'. Default: %(default)r"
78
    )
79
80
81
def add_histogram_options(addoption, prefix="benchmark-"):
82
    filename_prefix = "benchmark_%s" % get_current_time()
83
    addoption(
84
        "--{0}histogram".format(prefix),
85
        action="append", metavar="FILENAME-PREFIX", nargs="?", default=[], const=filename_prefix,
86
        help="Plot graphs of min/max/avg/stddev over time in FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX contains"
87
             " slashes ('/') then directories will be created. Default: %r" % filename_prefix
88
    )
89
90
91
def add_csv_options(addoption, prefix="benchmark-"):
92
    filename_prefix = "benchmark_%s" % get_current_time()
93
    addoption(
94
        "--{0}csv".format(prefix),
95
        action="append", metavar="FILENAME", nargs="?", default=[], const=filename_prefix,
96
        help="Save a csv report. If FILENAME contains"
97
             " slashes ('/') then directories will be created. Default: %r" % filename_prefix
98
    )
99
100
101
def add_global_options(addoption, prefix="benchmark-"):
102
    addoption(
103
        "--{0}storage".format(prefix),
104
        metavar="STORAGE-PATH", default="./.benchmarks",
105
        help="Specify a different path to store the runs (when --benchmark-save or --benchmark-autosave are used). "
106
             "Default: %(default)r",
107
    )
108
    addoption(
109
        "--{0}verbose".format(prefix),
110
        action="store_true", default=False,
111
        help="Dump diagnostic and progress information."
112
    )
113
114
115
def pytest_addoption(parser):
116
    group = parser.getgroup("benchmark")
117
    group.addoption(
118
        "--benchmark-min-time",
119
        metavar="SECONDS", type=parse_seconds, default="0.000005",
120
        help="Minimum time per round in seconds. Default: %(default)r"
121
    )
122
    group.addoption(
123
        "--benchmark-max-time",
124
        metavar="SECONDS", type=parse_seconds, default="1.0",
125
        help="Maximum run time per test - it will be repeated until this total time is reached. It may be "
126
             "exceeded if test function is very slow or --benchmark-min-rounds is large (it takes precedence). "
127
             "Default: %(default)r"
128
    )
129
    group.addoption(
130
        "--benchmark-min-rounds",
131
        metavar="NUM", type=parse_rounds, default=5,
132
        help="Minimum rounds, even if total time would exceed `--max-time`. Default: %(default)r"
133
    )
134
    group.addoption(
135
        "--benchmark-timer",
136
        metavar="FUNC", type=parse_timer, default=str(NameWrapper(default_timer)),
137
        help="Timer to use when measuring time. Default: %(default)r"
138
    )
139
    group.addoption(
140
        "--benchmark-calibration-precision",
141
        metavar="NUM", type=int, default=10,
142
        help="Precision to use when calibrating number of iterations. Precision of 10 will make the timer look 10 times"
143
             " more accurate, at a cost of less precise measure of deviations. Default: %(default)r"
144
    )
145
    group.addoption(
146
        "--benchmark-warmup",
147
        metavar="KIND", nargs="?", default=parse_warmup("auto"), type=parse_warmup,
148
        help="Activates warmup. Will run the test function up to number of times in the calibration phase. "
149
             "See `--benchmark-warmup-iterations`. Note: Even the warmup phase obeys --benchmark-max-time. "
150
             "Available KIND: 'auto', 'off', 'on'. Default: 'auto' (automatically activate on PyPy)."
151
    )
152
    group.addoption(
153
        "--benchmark-warmup-iterations",
154
        metavar="NUM", type=int, default=100000,
155
        help="Max number of iterations to run in the warmup phase. Default: %(default)r"
156
    )
157
    group.addoption(
158
        "--benchmark-disable-gc",
159
        action="store_true", default=False,
160
        help="Disable GC during benchmarks."
161
    )
162
    group.addoption(
163
        "--benchmark-skip",
164
        action="store_true", default=False,
165
        help="Skip running any tests that contain benchmarks."
166
    )
167
    group.addoption(
168
        "--benchmark-disable",
169
        action="store_true", default=False,
170
        help="Disable benchmarks. Benchmarked functions are only ran once and no stats are reported. Use this is you "
171
             "want to run the test but don't do any benchmarking."
172
    )
173
    group.addoption(
174
        "--benchmark-only",
175
        action="store_true", default=False,
176
        help="Only run benchmarks."
177
    )
178
    group.addoption(
179
        "--benchmark-save",
180
        metavar="NAME", type=parse_save,
181
        help="Save the current run into 'STORAGE-PATH/counter_NAME.json'."
182
    )
183
    tag = get_tag()
184
    group.addoption(
185
        "--benchmark-autosave",
186
        action='store_const', const=tag,
187
        help="Autosave the current run into 'STORAGE-PATH/counter_%s.json" % tag,
188
    )
189
    group.addoption(
190
        "--benchmark-save-data",
191
        action="store_true",
192
        help="Use this to make --benchmark-save and --benchmark-autosave include all the timing data,"
193
             " not just the stats.",
194
    )
195
    group.addoption(
196
        "--benchmark-json",
197
        metavar="PATH", type=argparse.FileType('wb'),
198
        help="Dump a JSON report into PATH. "
199
             "Note that this will include the complete data (all the timings, not just the stats)."
200
    )
201
    group.addoption(
202
        "--benchmark-compare",
203
        metavar="NUM", nargs="?", default=[], const=True,
204
        help="Compare the current run against run NUM or the latest saved run if unspecified."
205
    )
206
    group.addoption(
207
        "--benchmark-compare-fail",
208
        metavar="EXPR", nargs="+", type=parse_compare_fail,
209
        help="Fail test if performance regresses according to given EXPR"
210
             " (eg: min:5%% or mean:0.001 for number of seconds). Can be used multiple times."
211
    )
212
    add_global_options(group.addoption)
213
    add_display_options(group.addoption)
214
    add_histogram_options(group.addoption)
215
216
217
def pytest_addhooks(pluginmanager):
218
    from . import hookspec
219
220
    method = getattr(pluginmanager, "add_hookspecs", None)
221
    if method is None:
222
        method = pluginmanager.addhooks
223
    method(hookspec)
224
225
226
def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
227
    machine_info = format_dict(machine_info)
228
    compared_machine_info = format_dict(compared_benchmark["machine_info"])
229
230
    if compared_machine_info != machine_info:
231
        benchmarksession.logger.warn(
232
            "BENCHMARK-C6",
233
            "Benchmark machine_info is different. Current: %s VS saved: %s." % (
234
                machine_info,
235
                compared_machine_info,
236
            ),
237
            fslocation=benchmarksession.storage.location
238
        )
239
240
if hasattr(pytest, 'hookimpl'):
241
    _hookwrapper = pytest.hookimpl(hookwrapper=True)
242
else:
243
    _hookwrapper = pytest.mark.hookwrapper
244
245
246
@_hookwrapper
247
def pytest_runtest_call(item):
248
    bs = item.config._benchmarksession
249
    fixure = hasattr(item, "funcargs") and item.funcargs.get("benchmark")
250
    if isinstance(fixure, BenchmarkFixture):
251
        if bs.skip:
252
            pytest.skip("Skipping benchmark (--benchmark-skip active).")
253
        else:
254
            yield
255
    else:
256
        if bs.only:
257
            pytest.skip("Skipping non-benchmark (--benchmark-only active).")
258
        else:
259
            yield
260
261
262
def pytest_benchmark_group_stats(config, benchmarks, group_by):
263
    groups = defaultdict(list)
264
    for bench in benchmarks:
265
        key = ()
266
        for grouping in group_by.split(','):
267
            if grouping == "group":
268
                key += bench["group"],
269
            elif grouping == "name":
270
                key += bench["name"],
271
            elif grouping == "func":
272
                key += bench["name"].split("[")[0],
273
            elif grouping == "fullname":
274
                key += bench["fullname"],
275
            elif grouping == "fullfunc":
276
                key += bench["fullname"].split("[")[0],
277
            elif grouping == "param":
278
                key += bench["param"],
279
            elif grouping.startswith("param:"):
280
                param_name = grouping[len("param:"):]
281
                key += '%s=%s' % (param_name, bench["params"][param_name]),
282
            else:
283
                raise NotImplementedError("Unsupported grouping %r." % group_by)
284
        groups[' '.join(str(p) for p in key if p) or None].append(bench)
285
286
    for grouped_benchmarks in groups.values():
287
        grouped_benchmarks.sort(key=operator.itemgetter("fullname" if "full" in group_by else "name"))
288
    return sorted(groups.items(), key=lambda pair: pair[0] or "")
289
290
291
@_hookwrapper
292
def pytest_sessionfinish(session, exitstatus):
293
    session.config._benchmarksession.finish()
294
    yield
295
296
297
def pytest_terminal_summary(terminalreporter):
298
    try:
299
        terminalreporter.config._benchmarksession.display(terminalreporter)
300
    except PerformanceRegression:
301
        raise
302
    except Exception:
303
        terminalreporter.config._benchmarksession.logger.error("\n%s" % traceback.format_exc())
304
        raise
305
306
307
def pytest_benchmark_generate_machine_info():
308
    python_implementation = platform.python_implementation()
309
    python_implementation_version = platform.python_version()
310
    if python_implementation == 'PyPy':
311
        python_implementation_version = '%d.%d.%d' % sys.pypy_version_info[:3]
312
        if sys.pypy_version_info.releaselevel != 'final':
313
            python_implementation_version += '-%s%d' % sys.pypy_version_info[3:]
314
    return {
315
        "node": platform.node(),
316
        "processor": platform.processor(),
317
        "machine": platform.machine(),
318
        "python_compiler": platform.python_compiler(),
319
        "python_implementation": python_implementation,
320
        "python_implementation_version": python_implementation_version,
321
        "python_version": platform.python_version(),
322
        "python_build": platform.python_build(),
323
        "release": platform.release(),
324
        "system": platform.system()
325
    }
326
327
328
def pytest_benchmark_generate_commit_info(config):
329
    return get_commit_info()
330
331
332
def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_info, commit_info):
333
    benchmarks_json = []
334
    output_json = {
335
        "machine_info": machine_info,
336
        "commit_info": commit_info,
337
        "benchmarks": benchmarks_json,
338
        "datetime": datetime.utcnow().isoformat(),
339
        "version": __version__,
340
    }
341
    for bench in benchmarks:
342
        if not bench.has_error:
343
            benchmarks_json.append(bench.as_dict(include_data=include_data))
344
    return output_json
345
346
347
@pytest.fixture(scope="function")
348
def benchmark(request):
349
    bs = request.config._benchmarksession
350
351
    if bs.skip:
352
        pytest.skip("Benchmarks are skipped (--benchmark-skip was used).")
353
    else:
354
        node = request.node
355
        marker = node.get_marker("benchmark")
356
        options = marker.kwargs if marker else {}
357
        if "timer" in options:
358
            options["timer"] = NameWrapper(options["timer"])
359
        fixture = BenchmarkFixture(
360
            node,
361
            add_stats=bs.benchmarks.append,
362
            logger=bs.logger,
363
            warner=request.node.warn,
364
            disable=bs.disable,
365
            **dict(bs.options, **options)
366
        )
367
        request.addfinalizer(fixture._cleanup)
368
        return fixture
369
370
371
@pytest.fixture(scope="function")
372
def benchmark_weave(benchmark):
373
    return benchmark.weave
374
375
376
def pytest_runtest_setup(item):
377
    marker = item.get_marker("benchmark")
378
    if marker:
379
        if marker.args:
380
            raise ValueError("benchmark mark can't have positional arguments.")
381
        for name in marker.kwargs:
382
            if name not in (
383
                    "max_time", "min_rounds", "min_time", "timer", "group", "disable_gc", "warmup",
384
                    "warmup_iterations", "calibration_precision"):
385
                raise ValueError("benchmark mark can't have %r keyword argument." % name)
386
387
388
@pytest.mark.trylast  # force the other plugins to initialise, fixes issue with capture not being properly initialised
389
def pytest_configure(config):
390
    config.addinivalue_line("markers", "benchmark: mark a test with custom benchmark settings.")
391
    config._benchmarksession = BenchmarkSession(config)
392
    config.pluginmanager.register(config._benchmarksession, "pytest-benchmark")
393