Completed
Push — master ( 5dce03...efda8a )
by Ionel Cristian
01:05
created

src.pytest_benchmark.BenchmarkSession.compare_file()   F

Complexity

Conditions 10

Size

Total Lines 33

Duplication

Lines 0
Ratio 0 %
Metric Value
cc 10
dl 0
loc 33
rs 3.1304

How to fix   Complexity   

Complexity

Complex classes like src.pytest_benchmark.BenchmarkSession.compare_file() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
from __future__ import division
2
from __future__ import print_function
3
4
import argparse
5
import operator
6
import platform
7
import sys
8
import traceback
9
from collections import defaultdict
10
from datetime import datetime
11
12
import pytest
13
14
from . import __version__
15
from .fixture import BenchmarkFixture
16
from .session import BenchmarkSession
17
from .session import PerformanceRegression
18
from .timers import default_timer
19
from .utils import NameWrapper
20
from .utils import format_dict
21
from .utils import get_commit_info
22
from .utils import get_current_time
23
from .utils import get_tag
24
from .utils import parse_columns
25
from .utils import parse_compare_fail
26
from .utils import parse_rounds
27
from .utils import parse_save
28
from .utils import parse_seconds
29
from .utils import parse_sort
30
from .utils import parse_timer
31
from .utils import parse_warmup
32
33
try:
34
    import statistics
35
except (ImportError, SyntaxError):
36
    statistics = False
37
    statistics_error = traceback.format_exc()
38
else:
39
    from .stats import Stats
40
41
42
class FixtureAlreadyUsed(Exception):
43
    pass
44
45
46
def pytest_report_header(config):
47
    bs = config._benchmarksession
48
49
    return ("benchmark: {version} (defaults:"
50
            " timer={timer}"
51
            " disable_gc={0[disable_gc]}"
52
            " min_rounds={0[min_rounds]}"
53
            " min_time={0[min_time]}"
54
            " max_time={0[max_time]}"
55
            " calibration_precision={0[calibration_precision]}"
56
            " warmup={0[warmup]}"
57
            " warmup_iterations={0[warmup_iterations]}"
58
            ")").format(
59
        bs.options,
60
        version=__version__,
61
        timer=bs.options.get("timer"),
62
    )
63
64
65
def add_display_options(addoption):
66
    addoption(
67
        "--benchmark-sort",
68
        metavar="COL", type=parse_sort, default="min",
69
        help="Column to sort on. Can be one of: 'min', 'max', 'mean', 'stddev', "
70
             "'name', 'fullname'. Default: %(default)r"
71
    )
72
    addoption(
73
        "--benchmark-group-by",
74
        metavar="LABEL", default="group",
75
        help="How to group tests. Can be one of: 'group', 'name', 'fullname', 'func', 'fullfunc', "
76
             "'param' or 'param:NAME', where NAME is the name passed to @pytest.parametrize."
77
             " Default: %(default)r"
78
    )
79
    addoption(
80
        "--benchmark-columns",
81
        metavar="LABELS", type=parse_columns,
82
        default="min, max, mean, stddev, median, iqr, outliers, rounds, iterations",
83
        help='Comma-separated list of columns to show in the result table. Default: %(default)r'
84
    )
85
    prefix = "benchmark_%s" % get_current_time()
86
    addoption(
87
        "--benchmark-histogram",
88
        action='append', metavar="FILENAME-PREFIX", nargs="?", default=[], const=prefix,
89
        help="Plot graphs of min/max/avg/stddev over time in FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX contains"
90
             " slashes ('/') then directories will be created. Default: %r" % prefix
91
    )
92
93
94
def add_global_options(addoption):
95
    addoption(
96
        "--benchmark-storage",
97
        metavar="STORAGE-PATH", default="./.benchmarks",
98
        help="Specify a different path to store the runs (when --benchmark-save or --benchmark-autosave are used). "
99
             "Default: %(default)r",
100
    )
101
    addoption(
102
        "--benchmark-verbose",
103
        action="store_true", default=False,
104
        help="Dump diagnostic and progress information."
105
    )
106
107
108
def pytest_addoption(parser):
109
    group = parser.getgroup("benchmark")
110
    group.addoption(
111
        "--benchmark-min-time",
112
        metavar="SECONDS", type=parse_seconds, default="0.000005",
113
        help="Minimum time per round in seconds. Default: %(default)r"
114
    )
115
    group.addoption(
116
        "--benchmark-max-time",
117
        metavar="SECONDS", type=parse_seconds, default="1.0",
118
        help="Maximum run time per test - it will be repeated until this total time is reached. It may be "
119
             "exceeded if test function is very slow or --benchmark-min-rounds is large (it takes precedence). "
120
             "Default: %(default)r"
121
    )
122
    group.addoption(
123
        "--benchmark-min-rounds",
124
        metavar="NUM", type=parse_rounds, default=5,
125
        help="Minimum rounds, even if total time would exceed `--max-time`. Default: %(default)r"
126
    )
127
    group.addoption(
128
        "--benchmark-timer",
129
        metavar="FUNC", type=parse_timer, default=str(NameWrapper(default_timer)),
130
        help="Timer to use when measuring time. Default: %(default)r"
131
    )
132
    group.addoption(
133
        "--benchmark-calibration-precision",
134
        metavar="NUM", type=int, default=10,
135
        help="Precision to use when calibrating number of iterations. Precision of 10 will make the timer look 10 times"
136
             " more accurate, at a cost of less precise measure of deviations. Default: %(default)r"
137
    )
138
    group.addoption(
139
        "--benchmark-warmup",
140
        metavar="KIND", nargs="?", default=parse_warmup("auto"), type=parse_warmup,
141
        help="Activates warmup. Will run the test function up to number of times in the calibration phase. "
142
             "See `--benchmark-warmup-iterations`. Note: Even the warmup phase obeys --benchmark-max-time. "
143
             "Available KIND: 'auto', 'off', 'on'. Default: 'auto' (automatically activate on PyPy)."
144
    )
145
    group.addoption(
146
        "--benchmark-warmup-iterations",
147
        metavar="NUM", type=int, default=100000,
148
        help="Max number of iterations to run in the warmup phase. Default: %(default)r"
149
    )
150
    group.addoption(
151
        "--benchmark-disable-gc",
152
        action="store_true", default=False,
153
        help="Disable GC during benchmarks."
154
    )
155
    group.addoption(
156
        "--benchmark-skip",
157
        action="store_true", default=False,
158
        help="Skip running any tests that contain benchmarks."
159
    )
160
    group.addoption(
161
        "--benchmark-disable",
162
        action="store_true", default=False,
163
        help="Disable benchmarks. Benchmarked functions are only ran once and no stats are reported. Use this is you "
164
             "want to run the test but don't do any benchmarking."
165
    )
166
    group.addoption(
167
        "--benchmark-only",
168
        action="store_true", default=False,
169
        help="Only run benchmarks."
170
    )
171
    group.addoption(
172
        "--benchmark-save",
173
        metavar="NAME", type=parse_save,
174
        help="Save the current run into 'STORAGE-PATH/counter_NAME.json'."
175
    )
176
    tag = get_tag()
177
    group.addoption(
178
        "--benchmark-autosave",
179
        action='store_const', const=tag,
180
        help="Autosave the current run into 'STORAGE-PATH/counter_%s.json" % tag,
181
    )
182
    group.addoption(
183
        "--benchmark-save-data",
184
        action="store_true",
185
        help="Use this to make --benchmark-save and --benchmark-autosave include all the timing data,"
186
             " not just the stats.",
187
    )
188
    group.addoption(
189
        "--benchmark-json",
190
        metavar="PATH", type=argparse.FileType('wb'),
191
        help="Dump a JSON report into PATH. "
192
             "Note that this will include the complete data (all the timings, not just the stats)."
193
    )
194
    group.addoption(
195
        "--benchmark-compare",
196
        metavar="NUM", nargs="?", default=[], const=True,
197
        help="Compare the current run against run NUM or the latest saved run if unspecified."
198
    )
199
    group.addoption(
200
        "--benchmark-compare-fail",
201
        metavar="EXPR", nargs="+", type=parse_compare_fail,
202
        help="Fail test if performance regresses according to given EXPR"
203
             " (eg: min:5%% or mean:0.001 for number of seconds). Can be used multiple times."
204
    )
205
    add_global_options(group.addoption)
206
    add_display_options(group.addoption)
207
208
209
def pytest_addhooks(pluginmanager):
210
    from . import hookspec
211
212
    method = getattr(pluginmanager, "add_hookspecs", None)
213
    if method is None:
214
        method = pluginmanager.addhooks
215
    method(hookspec)
216
217
218
219
220
def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
221
    if compared_benchmark["machine_info"] != machine_info:
222
        benchmarksession.logger.warn(
223
            "BENCHMARK-C6",
224
            "Benchmark machine_info is different. Current: %s VS saved: %s." % (
225
                format_dict(machine_info),
226
                format_dict(compared_benchmark["machine_info"]),
227
            ),
228
            fslocation=benchmarksession.storage.location
229
        )
230
231
if hasattr(pytest, 'hookimpl'):
232
    _hookwrapper = pytest.hookimpl(hookwrapper=True)
233
else:
234
    _hookwrapper = pytest.mark.hookwrapper
235
236
237
@_hookwrapper
238
def pytest_runtest_call(item):
239
    bs = item.config._benchmarksession
240
    fixure = hasattr(item, "funcargs") and item.funcargs.get("benchmark")
241
    if isinstance(fixure, BenchmarkFixture):
242
        if bs.skip:
243
            pytest.skip("Skipping benchmark (--benchmark-skip active).")
244
        else:
245
            yield
246
    else:
247
        if bs.only:
248
            pytest.skip("Skipping non-benchmark (--benchmark-only active).")
249
        else:
250
            yield
251
252
253
def pytest_benchmark_group_stats(config, benchmarks, group_by):
254
    groups = defaultdict(list)
255
    for bench in benchmarks:
256
        if group_by == "group":
257
            groups[bench["group"]].append(bench)
258
259
        elif group_by == "name":
260
            groups[bench["canonical_name"]].append(bench)
261
262
        elif group_by == "func":
263
            groups[bench["canonical_name"].split("[")[0]].append(bench)
264
265
        elif group_by == "fullname":
266
            groups[bench["canonical_fullname"]].append(bench)
267
268
        elif group_by == "fullfunc":
269
            groups[bench["canonical_fullname"].split("[")[0]].append(bench)
270
271
        elif group_by == "param":
272
            groups[bench["param"]].append(bench)
273
274
        elif group_by.startswith("param:"):
275
            param_name = group_by[len("param:"):]
276
            param_value = bench["params"][param_name]
277
            groups[param_value].append(bench)
278
279
        else:
280
            raise NotImplementedError("Unsupported grouping %r." % group_by)
281
    #
282
    for grouped_benchmarks in groups.values():
283
        grouped_benchmarks.sort(key=operator.itemgetter("fullname" if "full" in group_by else "name"))
284
    return sorted(groups.items(), key=lambda pair: pair[0] or "")
285
286
287
@_hookwrapper
288
def pytest_sessionfinish(session, exitstatus):
289
    session.config._benchmarksession.finish()
290
    yield
291
292
293
def pytest_terminal_summary(terminalreporter):
294
    try:
295
        terminalreporter.config._benchmarksession.display(terminalreporter)
296
    except PerformanceRegression:
297
        raise
298
    except Exception:
299
        terminalreporter.config._benchmarksession.logger.error("\n%s" % traceback.format_exc())
300
        raise
301
302
303
def pytest_benchmark_generate_machine_info():
304
    python_implementation = platform.python_implementation()
305
    python_implementation_version = platform.python_version()
306
    if python_implementation == 'PyPy':
307
        python_implementation_version = '%d.%d.%d' % sys.pypy_version_info[:3]
308
        if sys.pypy_version_info.releaselevel != 'final':
309
            python_implementation_version += '-%s%d' % sys.pypy_version_info[3:]
310
    return {
311
        "node": platform.node(),
312
        "processor": platform.processor(),
313
        "machine": platform.machine(),
314
        "python_compiler": platform.python_compiler(),
315
        "python_implementation": python_implementation,
316
        "python_implementation_version": python_implementation_version,
317
        "python_version": platform.python_version(),
318
        "python_build": platform.python_build(),
319
        "release": platform.release(),
320
        "system": platform.system()
321
    }
322
323
324
def pytest_benchmark_generate_commit_info(config):
325
    return get_commit_info()
326
327
328
def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_info, commit_info):
329
    benchmarks_json = []
330
    output_json = {
331
        "machine_info": machine_info,
332
        "commit_info": commit_info,
333
        "benchmarks": benchmarks_json,
334
        "datetime": datetime.utcnow().isoformat(),
335
        "version": __version__,
336
    }
337
    for bench in benchmarks:
338
        if not bench.has_error:
339
            benchmarks_json.append(bench.as_dict(include_data=include_data))
340
    return output_json
341
342
343
@pytest.fixture(scope="function")
344
def benchmark(request):
345
    bs = request.config._benchmarksession
346
347
    if bs.skip:
348
        pytest.skip("Benchmarks are skipped (--benchmark-skip was used).")
349
    else:
350
        node = request.node
351
        marker = node.get_marker("benchmark")
352
        options = marker.kwargs if marker else {}
353
        if "timer" in options:
354
            options["timer"] = NameWrapper(options["timer"])
355
        fixture = BenchmarkFixture(
356
            node,
357
            add_stats=bs.benchmarks.append,
358
            logger=bs.logger,
359
            warner=request.node.warn,
360
            disable=bs.disable,
361
            **dict(bs.options, **options)
362
        )
363
        request.addfinalizer(fixture._cleanup)
364
        return fixture
365
366
367
@pytest.fixture(scope="function")
368
def benchmark_weave(benchmark):
369
    return benchmark.weave
370
371
372
def pytest_runtest_setup(item):
373
    marker = item.get_marker("benchmark")
374
    if marker:
375
        if marker.args:
376
            raise ValueError("benchmark mark can't have positional arguments.")
377
        for name in marker.kwargs:
378
            if name not in (
379
                    "max_time", "min_rounds", "min_time", "timer", "group", "disable_gc", "warmup",
380
                    "warmup_iterations", "calibration_precision"):
381
                raise ValueError("benchmark mark can't have %r keyword argument." % name)
382
383
384
@pytest.mark.trylast  # force the other plugins to initialise, fixes issue with capture not being properly initialised
385
def pytest_configure(config):
386
    config.addinivalue_line("markers", "benchmark: mark a test with custom benchmark settings.")
387
    config._benchmarksession = BenchmarkSession(config)
388
    config.pluginmanager.register(config._benchmarksession, "pytest-benchmark")
389