Completed
Push — master ( 17fae3...aba503 )
by Ionel Cristian
01:01
created

src.pytest_benchmark.pytest_benchmark_group_stats()   F

Complexity

Conditions 12

Size

Total Lines 32

Duplication

Lines 0
Ratio 0 %
Metric Value
cc 12
dl 0
loc 32
rs 2.7856

How to fix   Complexity   

Complexity

Complex classes like src.pytest_benchmark.pytest_benchmark_group_stats() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
from __future__ import division
2
from __future__ import print_function
3
4
import argparse
5
import operator
6
import platform
7
import sys
8
import traceback
9
from collections import defaultdict
10
from datetime import datetime
11
12
import pytest
13
14
from . import __version__
15
from .fixture import BenchmarkFixture
16
from .session import BenchmarkSession
17
from .session import PerformanceRegression
18
from .timers import default_timer
19
from .utils import NameWrapper
20
from .utils import format_dict
21
from .utils import get_commit_info
22
from .utils import get_current_time
23
from .utils import get_tag
24
from .utils import parse_columns
25
from .utils import parse_compare_fail
26
from .utils import parse_rounds
27
from .utils import parse_save
28
from .utils import parse_seconds
29
from .utils import parse_sort
30
from .utils import parse_timer
31
from .utils import parse_warmup
32
33
34
def pytest_report_header(config):
35
    bs = config._benchmarksession
36
37
    return ("benchmark: {version} (defaults:"
38
            " timer={timer}"
39
            " disable_gc={0[disable_gc]}"
40
            " min_rounds={0[min_rounds]}"
41
            " min_time={0[min_time]}"
42
            " max_time={0[max_time]}"
43
            " calibration_precision={0[calibration_precision]}"
44
            " warmup={0[warmup]}"
45
            " warmup_iterations={0[warmup_iterations]}"
46
            ")").format(
47
        bs.options,
48
        version=__version__,
49
        timer=bs.options.get("timer"),
50
    )
51
52
53
def add_display_options(addoption):
54
    addoption(
55
        "--benchmark-sort",
56
        metavar="COL", type=parse_sort, default="min",
57
        help="Column to sort on. Can be one of: 'min', 'max', 'mean', 'stddev', "
58
             "'name', 'fullname'. Default: %(default)r"
59
    )
60
    addoption(
61
        "--benchmark-group-by",
62
        metavar="LABEL", default="group",
63
        help="How to group tests. Can be one of: 'group', 'name', 'fullname', 'func', 'fullfunc', "
64
             "'param' or 'param:NAME', where NAME is the name passed to @pytest.parametrize."
65
             " Default: %(default)r"
66
    )
67
    addoption(
68
        "--benchmark-columns",
69
        metavar="LABELS", type=parse_columns,
70
        default="min, max, mean, stddev, median, iqr, outliers, rounds, iterations",
71
        help='Comma-separated list of columns to show in the result table. Default: %(default)r'
72
    )
73
    prefix = "benchmark_%s" % get_current_time()
74
    addoption(
75
        "--benchmark-histogram",
76
        action='append', metavar="FILENAME-PREFIX", nargs="?", default=[], const=prefix,
77
        help="Plot graphs of min/max/avg/stddev over time in FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX contains"
78
             " slashes ('/') then directories will be created. Default: %r" % prefix
79
    )
80
81
82
def add_global_options(addoption):
83
    addoption(
84
        "--benchmark-storage",
85
        metavar="STORAGE-PATH", default="./.benchmarks",
86
        help="Specify a different path to store the runs (when --benchmark-save or --benchmark-autosave are used). "
87
             "Default: %(default)r",
88
    )
89
    addoption(
90
        "--benchmark-verbose",
91
        action="store_true", default=False,
92
        help="Dump diagnostic and progress information."
93
    )
94
95
96
def pytest_addoption(parser):
97
    group = parser.getgroup("benchmark")
98
    group.addoption(
99
        "--benchmark-min-time",
100
        metavar="SECONDS", type=parse_seconds, default="0.000005",
101
        help="Minimum time per round in seconds. Default: %(default)r"
102
    )
103
    group.addoption(
104
        "--benchmark-max-time",
105
        metavar="SECONDS", type=parse_seconds, default="1.0",
106
        help="Maximum run time per test - it will be repeated until this total time is reached. It may be "
107
             "exceeded if test function is very slow or --benchmark-min-rounds is large (it takes precedence). "
108
             "Default: %(default)r"
109
    )
110
    group.addoption(
111
        "--benchmark-min-rounds",
112
        metavar="NUM", type=parse_rounds, default=5,
113
        help="Minimum rounds, even if total time would exceed `--max-time`. Default: %(default)r"
114
    )
115
    group.addoption(
116
        "--benchmark-timer",
117
        metavar="FUNC", type=parse_timer, default=str(NameWrapper(default_timer)),
118
        help="Timer to use when measuring time. Default: %(default)r"
119
    )
120
    group.addoption(
121
        "--benchmark-calibration-precision",
122
        metavar="NUM", type=int, default=10,
123
        help="Precision to use when calibrating number of iterations. Precision of 10 will make the timer look 10 times"
124
             " more accurate, at a cost of less precise measure of deviations. Default: %(default)r"
125
    )
126
    group.addoption(
127
        "--benchmark-warmup",
128
        metavar="KIND", nargs="?", default=parse_warmup("auto"), type=parse_warmup,
129
        help="Activates warmup. Will run the test function up to number of times in the calibration phase. "
130
             "See `--benchmark-warmup-iterations`. Note: Even the warmup phase obeys --benchmark-max-time. "
131
             "Available KIND: 'auto', 'off', 'on'. Default: 'auto' (automatically activate on PyPy)."
132
    )
133
    group.addoption(
134
        "--benchmark-warmup-iterations",
135
        metavar="NUM", type=int, default=100000,
136
        help="Max number of iterations to run in the warmup phase. Default: %(default)r"
137
    )
138
    group.addoption(
139
        "--benchmark-disable-gc",
140
        action="store_true", default=False,
141
        help="Disable GC during benchmarks."
142
    )
143
    group.addoption(
144
        "--benchmark-skip",
145
        action="store_true", default=False,
146
        help="Skip running any tests that contain benchmarks."
147
    )
148
    group.addoption(
149
        "--benchmark-disable",
150
        action="store_true", default=False,
151
        help="Disable benchmarks. Benchmarked functions are only ran once and no stats are reported. Use this is you "
152
             "want to run the test but don't do any benchmarking."
153
    )
154
    group.addoption(
155
        "--benchmark-only",
156
        action="store_true", default=False,
157
        help="Only run benchmarks."
158
    )
159
    group.addoption(
160
        "--benchmark-save",
161
        metavar="NAME", type=parse_save,
162
        help="Save the current run into 'STORAGE-PATH/counter_NAME.json'."
163
    )
164
    tag = get_tag()
165
    group.addoption(
166
        "--benchmark-autosave",
167
        action='store_const', const=tag,
168
        help="Autosave the current run into 'STORAGE-PATH/counter_%s.json" % tag,
169
    )
170
    group.addoption(
171
        "--benchmark-save-data",
172
        action="store_true",
173
        help="Use this to make --benchmark-save and --benchmark-autosave include all the timing data,"
174
             " not just the stats.",
175
    )
176
    group.addoption(
177
        "--benchmark-json",
178
        metavar="PATH", type=argparse.FileType('wb'),
179
        help="Dump a JSON report into PATH. "
180
             "Note that this will include the complete data (all the timings, not just the stats)."
181
    )
182
    group.addoption(
183
        "--benchmark-compare",
184
        metavar="NUM", nargs="?", default=[], const=True,
185
        help="Compare the current run against run NUM or the latest saved run if unspecified."
186
    )
187
    group.addoption(
188
        "--benchmark-compare-fail",
189
        metavar="EXPR", nargs="+", type=parse_compare_fail,
190
        help="Fail test if performance regresses according to given EXPR"
191
             " (eg: min:5%% or mean:0.001 for number of seconds). Can be used multiple times."
192
    )
193
    add_global_options(group.addoption)
194
    add_display_options(group.addoption)
195
196
197
def pytest_addhooks(pluginmanager):
198
    from . import hookspec
199
200
    method = getattr(pluginmanager, "add_hookspecs", None)
201
    if method is None:
202
        method = pluginmanager.addhooks
203
    method(hookspec)
204
205
206
def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
207
    if compared_benchmark["machine_info"] != machine_info:
208
        benchmarksession.logger.warn(
209
            "BENCHMARK-C6",
210
            "Benchmark machine_info is different. Current: %s VS saved: %s." % (
211
                format_dict(machine_info),
212
                format_dict(compared_benchmark["machine_info"]),
213
            ),
214
            fslocation=benchmarksession.storage.location
215
        )
216
217
if hasattr(pytest, 'hookimpl'):
218
    _hookwrapper = pytest.hookimpl(hookwrapper=True)
219
else:
220
    _hookwrapper = pytest.mark.hookwrapper
221
222
223
@_hookwrapper
224
def pytest_runtest_call(item):
225
    bs = item.config._benchmarksession
226
    fixure = hasattr(item, "funcargs") and item.funcargs.get("benchmark")
227
    if isinstance(fixure, BenchmarkFixture):
228
        if bs.skip:
229
            pytest.skip("Skipping benchmark (--benchmark-skip active).")
230
        else:
231
            yield
232
    else:
233
        if bs.only:
234
            pytest.skip("Skipping non-benchmark (--benchmark-only active).")
235
        else:
236
            yield
237
238
239
def pytest_benchmark_group_stats(config, benchmarks, group_by):
240
    groups = defaultdict(list)
241
    for bench in benchmarks:
242
        if group_by == "group":
243
            groups[bench["group"]].append(bench)
244
245
        elif group_by == "name":
246
            groups[bench["canonical_name"]].append(bench)
247
248
        elif group_by == "func":
249
            groups[bench["canonical_name"].split("[")[0]].append(bench)
250
251
        elif group_by == "fullname":
252
            groups[bench["canonical_fullname"]].append(bench)
253
254
        elif group_by == "fullfunc":
255
            groups[bench["canonical_fullname"].split("[")[0]].append(bench)
256
257
        elif group_by == "param":
258
            groups[bench["param"]].append(bench)
259
260
        elif group_by.startswith("param:"):
261
            param_name = group_by[len("param:"):]
262
            param_value = bench["params"][param_name]
263
            groups[param_value].append(bench)
264
265
        else:
266
            raise NotImplementedError("Unsupported grouping %r." % group_by)
267
    #
268
    for grouped_benchmarks in groups.values():
269
        grouped_benchmarks.sort(key=operator.itemgetter("fullname" if "full" in group_by else "name"))
270
    return sorted(groups.items(), key=lambda pair: pair[0] or "")
271
272
273
@_hookwrapper
274
def pytest_sessionfinish(session, exitstatus):
275
    session.config._benchmarksession.finish()
276
    yield
277
278
279
def pytest_terminal_summary(terminalreporter):
280
    try:
281
        terminalreporter.config._benchmarksession.display(terminalreporter)
282
    except PerformanceRegression:
283
        raise
284
    except Exception:
285
        terminalreporter.config._benchmarksession.logger.error("\n%s" % traceback.format_exc())
286
        raise
287
288
289
def pytest_benchmark_generate_machine_info():
290
    python_implementation = platform.python_implementation()
291
    python_implementation_version = platform.python_version()
292
    if python_implementation == 'PyPy':
293
        python_implementation_version = '%d.%d.%d' % sys.pypy_version_info[:3]
294
        if sys.pypy_version_info.releaselevel != 'final':
295
            python_implementation_version += '-%s%d' % sys.pypy_version_info[3:]
296
    return {
297
        "node": platform.node(),
298
        "processor": platform.processor(),
299
        "machine": platform.machine(),
300
        "python_compiler": platform.python_compiler(),
301
        "python_implementation": python_implementation,
302
        "python_implementation_version": python_implementation_version,
303
        "python_version": platform.python_version(),
304
        "python_build": platform.python_build(),
305
        "release": platform.release(),
306
        "system": platform.system()
307
    }
308
309
310
def pytest_benchmark_generate_commit_info(config):
311
    return get_commit_info()
312
313
314
def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_info, commit_info):
315
    benchmarks_json = []
316
    output_json = {
317
        "machine_info": machine_info,
318
        "commit_info": commit_info,
319
        "benchmarks": benchmarks_json,
320
        "datetime": datetime.utcnow().isoformat(),
321
        "version": __version__,
322
    }
323
    for bench in benchmarks:
324
        if not bench.has_error:
325
            benchmarks_json.append(bench.as_dict(include_data=include_data))
326
    return output_json
327
328
329
@pytest.fixture(scope="function")
330
def benchmark(request):
331
    bs = request.config._benchmarksession
332
333
    if bs.skip:
334
        pytest.skip("Benchmarks are skipped (--benchmark-skip was used).")
335
    else:
336
        node = request.node
337
        marker = node.get_marker("benchmark")
338
        options = marker.kwargs if marker else {}
339
        if "timer" in options:
340
            options["timer"] = NameWrapper(options["timer"])
341
        fixture = BenchmarkFixture(
342
            node,
343
            add_stats=bs.benchmarks.append,
344
            logger=bs.logger,
345
            warner=request.node.warn,
346
            disable=bs.disable,
347
            **dict(bs.options, **options)
348
        )
349
        request.addfinalizer(fixture._cleanup)
350
        return fixture
351
352
353
@pytest.fixture(scope="function")
354
def benchmark_weave(benchmark):
355
    return benchmark.weave
356
357
358
def pytest_runtest_setup(item):
359
    marker = item.get_marker("benchmark")
360
    if marker:
361
        if marker.args:
362
            raise ValueError("benchmark mark can't have positional arguments.")
363
        for name in marker.kwargs:
364
            if name not in (
365
                    "max_time", "min_rounds", "min_time", "timer", "group", "disable_gc", "warmup",
366
                    "warmup_iterations", "calibration_precision"):
367
                raise ValueError("benchmark mark can't have %r keyword argument." % name)
368
369
370
@pytest.mark.trylast  # force the other plugins to initialise, fixes issue with capture not being properly initialised
371
def pytest_configure(config):
372
    config.addinivalue_line("markers", "benchmark: mark a test with custom benchmark settings.")
373
    config._benchmarksession = BenchmarkSession(config)
374
    config.pluginmanager.register(config._benchmarksession, "pytest-benchmark")
375