|
1
|
|
|
from __future__ import division |
|
2
|
|
|
from __future__ import print_function |
|
3
|
|
|
|
|
4
|
|
|
import argparse |
|
5
|
|
|
import operator |
|
6
|
|
|
import platform |
|
7
|
|
|
import sys |
|
8
|
|
|
import traceback |
|
9
|
|
|
from collections import defaultdict |
|
10
|
|
|
from datetime import datetime |
|
11
|
|
|
|
|
12
|
|
|
import pytest |
|
13
|
|
|
|
|
14
|
|
|
from . import __version__ |
|
15
|
|
|
from .fixture import BenchmarkFixture |
|
16
|
|
|
from .report_backend import ElasticReportBackend, FileReportBackend |
|
17
|
|
|
from .session import BenchmarkSession |
|
18
|
|
|
from .session import PerformanceRegression |
|
19
|
|
|
from .timers import default_timer |
|
20
|
|
|
from .utils import NameWrapper, parse_name_format |
|
21
|
|
|
from .utils import format_dict |
|
22
|
|
|
from .utils import get_project_name |
|
23
|
|
|
from .utils import get_commit_info |
|
24
|
|
|
from .utils import get_current_time |
|
25
|
|
|
from .utils import get_tag |
|
26
|
|
|
from .utils import parse_columns |
|
27
|
|
|
from .utils import parse_compare_fail |
|
28
|
|
|
from .utils import parse_rounds |
|
29
|
|
|
from .utils import parse_save |
|
30
|
|
|
from .utils import parse_seconds |
|
31
|
|
|
from .utils import parse_sort |
|
32
|
|
|
from .utils import parse_timer |
|
33
|
|
|
from .utils import parse_warmup |
|
34
|
|
|
|
|
35
|
|
|
|
|
36
|
|
|
def pytest_report_header(config): |
|
37
|
|
|
bs = config._benchmarksession |
|
38
|
|
|
|
|
39
|
|
|
return ("benchmark: {version} (defaults:" |
|
40
|
|
|
" timer={timer}" |
|
41
|
|
|
" disable_gc={0[disable_gc]}" |
|
42
|
|
|
" min_rounds={0[min_rounds]}" |
|
43
|
|
|
" min_time={0[min_time]}" |
|
44
|
|
|
" max_time={0[max_time]}" |
|
45
|
|
|
" calibration_precision={0[calibration_precision]}" |
|
46
|
|
|
" warmup={0[warmup]}" |
|
47
|
|
|
" warmup_iterations={0[warmup_iterations]}" |
|
48
|
|
|
")").format( |
|
49
|
|
|
bs.options, |
|
50
|
|
|
version=__version__, |
|
51
|
|
|
timer=bs.options.get("timer"), |
|
52
|
|
|
) |
|
53
|
|
|
|
|
54
|
|
|
|
|
55
|
|
|
def add_display_options(addoption, prefix="benchmark-"): |
|
56
|
|
|
addoption( |
|
57
|
|
|
"--{0}sort".format(prefix), |
|
58
|
|
|
metavar="COL", type=parse_sort, default="min", |
|
59
|
|
|
help="Column to sort on. Can be one of: 'min', 'max', 'mean', 'stddev', " |
|
60
|
|
|
"'name', 'fullname'. Default: %(default)r" |
|
61
|
|
|
) |
|
62
|
|
|
addoption( |
|
63
|
|
|
"--{0}group-by".format(prefix), |
|
64
|
|
|
metavar="LABEL", default="group", |
|
65
|
|
|
help="How to group tests. Can be one of: 'group', 'name', 'fullname', 'func', 'fullfunc', " |
|
66
|
|
|
"'param' or 'param:NAME', where NAME is the name passed to @pytest.parametrize." |
|
67
|
|
|
" Default: %(default)r" |
|
68
|
|
|
) |
|
69
|
|
|
addoption( |
|
70
|
|
|
"--{0}columns".format(prefix), |
|
71
|
|
|
metavar="LABELS", type=parse_columns, |
|
72
|
|
|
default="min, max, mean, stddev, median, iqr, outliers, rounds, iterations", |
|
73
|
|
|
help="Comma-separated list of columns to show in the result table. Default: %(default)r" |
|
74
|
|
|
) |
|
75
|
|
|
addoption( |
|
76
|
|
|
"--{0}name".format(prefix), |
|
77
|
|
|
metavar="FORMAT", type=parse_name_format, |
|
78
|
|
|
default="normal", |
|
79
|
|
|
help="How to format names in results. Can be one of 'short', 'normal', 'long'. Default: %(default)r" |
|
80
|
|
|
) |
|
81
|
|
|
|
|
82
|
|
|
|
|
83
|
|
|
def add_histogram_options(addoption, prefix="benchmark-"): |
|
84
|
|
|
filename_prefix = "benchmark_%s" % get_current_time() |
|
85
|
|
|
addoption( |
|
86
|
|
|
"--{0}histogram".format(prefix), |
|
87
|
|
|
action="append", metavar="FILENAME-PREFIX", nargs="?", default=[], const=filename_prefix, |
|
88
|
|
|
help="Plot graphs of min/max/avg/stddev over time in FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX contains" |
|
89
|
|
|
" slashes ('/') then directories will be created. Default: %r" % filename_prefix |
|
90
|
|
|
) |
|
91
|
|
|
|
|
92
|
|
|
|
|
93
|
|
|
def add_csv_options(addoption, prefix="benchmark-"): |
|
94
|
|
|
filename_prefix = "benchmark_%s" % get_current_time() |
|
95
|
|
|
addoption( |
|
96
|
|
|
"--{0}csv".format(prefix), |
|
97
|
|
|
action="append", metavar="FILENAME", nargs="?", default=[], const=filename_prefix, |
|
98
|
|
|
help="Save a csv report. If FILENAME contains" |
|
99
|
|
|
" slashes ('/') then directories will be created. Default: %r" % filename_prefix |
|
100
|
|
|
) |
|
101
|
|
|
|
|
102
|
|
|
|
|
103
|
|
|
def add_global_options(addoption, prefix="benchmark-"): |
|
104
|
|
|
addoption( |
|
105
|
|
|
"--{0}storage".format(prefix), |
|
106
|
|
|
metavar="STORAGE-PATH", default="./.benchmarks", |
|
107
|
|
|
help="Specify a different path to store the runs (when --benchmark-save or --benchmark-autosave are used). " |
|
108
|
|
|
"Default: %(default)r", |
|
109
|
|
|
) |
|
110
|
|
|
addoption( |
|
111
|
|
|
"--{0}verbose".format(prefix), |
|
112
|
|
|
action="store_true", default=False, |
|
113
|
|
|
help="Dump diagnostic and progress information." |
|
114
|
|
|
) |
|
115
|
|
|
|
|
116
|
|
|
|
|
117
|
|
|
def add_elasticsearch_options(addoption, prefix="benchmark-"): |
|
118
|
|
|
addoption( |
|
119
|
|
|
"--{0}elasticsearch".format(prefix), |
|
120
|
|
|
action="store_true", default=False, |
|
121
|
|
|
help="Save data to elasticsearch instead of json files." |
|
122
|
|
|
) |
|
123
|
|
|
|
|
124
|
|
|
addoption( |
|
125
|
|
|
"--{0}elasticsearch-host".format(prefix), |
|
126
|
|
|
metavar="URL", default="localhost:9200", |
|
127
|
|
|
help="Address of elasticsearch host." |
|
128
|
|
|
) |
|
129
|
|
|
addoption( |
|
130
|
|
|
"--{0}elasticsearch-index".format(prefix), |
|
131
|
|
|
metavar="INDEX", default="benchmark", |
|
132
|
|
|
help="Elasticsearch index to save data in." |
|
133
|
|
|
) |
|
134
|
|
|
addoption( |
|
135
|
|
|
"--{0}elasticsearch-doctype".format(prefix), |
|
136
|
|
|
metavar="DOCTYPE", default="benchmark", |
|
137
|
|
|
help="Elasticsearch doctype of inserted data." |
|
138
|
|
|
) |
|
139
|
|
|
|
|
140
|
|
|
|
|
141
|
|
|
def pytest_addoption(parser): |
|
142
|
|
|
group = parser.getgroup("benchmark") |
|
143
|
|
|
group.addoption( |
|
144
|
|
|
"--benchmark-min-time", |
|
145
|
|
|
metavar="SECONDS", type=parse_seconds, default="0.000005", |
|
146
|
|
|
help="Minimum time per round in seconds. Default: %(default)r" |
|
147
|
|
|
) |
|
148
|
|
|
group.addoption( |
|
149
|
|
|
"--benchmark-max-time", |
|
150
|
|
|
metavar="SECONDS", type=parse_seconds, default="1.0", |
|
151
|
|
|
help="Maximum run time per test - it will be repeated until this total time is reached. It may be " |
|
152
|
|
|
"exceeded if test function is very slow or --benchmark-min-rounds is large (it takes precedence). " |
|
153
|
|
|
"Default: %(default)r" |
|
154
|
|
|
) |
|
155
|
|
|
group.addoption( |
|
156
|
|
|
"--benchmark-min-rounds", |
|
157
|
|
|
metavar="NUM", type=parse_rounds, default=5, |
|
158
|
|
|
help="Minimum rounds, even if total time would exceed `--max-time`. Default: %(default)r" |
|
159
|
|
|
) |
|
160
|
|
|
group.addoption( |
|
161
|
|
|
"--benchmark-timer", |
|
162
|
|
|
metavar="FUNC", type=parse_timer, default=str(NameWrapper(default_timer)), |
|
163
|
|
|
help="Timer to use when measuring time. Default: %(default)r" |
|
164
|
|
|
) |
|
165
|
|
|
group.addoption( |
|
166
|
|
|
"--benchmark-calibration-precision", |
|
167
|
|
|
metavar="NUM", type=int, default=10, |
|
168
|
|
|
help="Precision to use when calibrating number of iterations. Precision of 10 will make the timer look 10 times" |
|
169
|
|
|
" more accurate, at a cost of less precise measure of deviations. Default: %(default)r" |
|
170
|
|
|
) |
|
171
|
|
|
group.addoption( |
|
172
|
|
|
"--benchmark-warmup", |
|
173
|
|
|
metavar="KIND", nargs="?", default=parse_warmup("auto"), type=parse_warmup, |
|
174
|
|
|
help="Activates warmup. Will run the test function up to number of times in the calibration phase. " |
|
175
|
|
|
"See `--benchmark-warmup-iterations`. Note: Even the warmup phase obeys --benchmark-max-time. " |
|
176
|
|
|
"Available KIND: 'auto', 'off', 'on'. Default: 'auto' (automatically activate on PyPy)." |
|
177
|
|
|
) |
|
178
|
|
|
group.addoption( |
|
179
|
|
|
"--benchmark-warmup-iterations", |
|
180
|
|
|
metavar="NUM", type=int, default=100000, |
|
181
|
|
|
help="Max number of iterations to run in the warmup phase. Default: %(default)r" |
|
182
|
|
|
) |
|
183
|
|
|
group.addoption( |
|
184
|
|
|
"--benchmark-disable-gc", |
|
185
|
|
|
action="store_true", default=False, |
|
186
|
|
|
help="Disable GC during benchmarks." |
|
187
|
|
|
) |
|
188
|
|
|
group.addoption( |
|
189
|
|
|
"--benchmark-skip", |
|
190
|
|
|
action="store_true", default=False, |
|
191
|
|
|
help="Skip running any tests that contain benchmarks." |
|
192
|
|
|
) |
|
193
|
|
|
group.addoption( |
|
194
|
|
|
"--benchmark-disable", |
|
195
|
|
|
action="store_true", default=False, |
|
196
|
|
|
help="Disable benchmarks. Benchmarked functions are only ran once and no stats are reported. Use this is you " |
|
197
|
|
|
"want to run the test but don't do any benchmarking." |
|
198
|
|
|
) |
|
199
|
|
|
group.addoption( |
|
200
|
|
|
"--benchmark-enable", |
|
201
|
|
|
action="store_true", default=False, |
|
202
|
|
|
help="Forcibly enable benchmarks. Use this option to override --benchmark-disable (in case you have it in " |
|
203
|
|
|
"pytest configuration)." |
|
204
|
|
|
) |
|
205
|
|
|
group.addoption( |
|
206
|
|
|
"--benchmark-only", |
|
207
|
|
|
action="store_true", default=False, |
|
208
|
|
|
help="Only run benchmarks." |
|
209
|
|
|
) |
|
210
|
|
|
group.addoption( |
|
211
|
|
|
"--benchmark-save", |
|
212
|
|
|
metavar="NAME", type=parse_save, |
|
213
|
|
|
help="Save the current run into 'STORAGE-PATH/counter_NAME.json'." |
|
214
|
|
|
) |
|
215
|
|
|
tag = get_tag() |
|
216
|
|
|
group.addoption( |
|
217
|
|
|
"--benchmark-autosave", |
|
218
|
|
|
action='store_const', const=tag, |
|
219
|
|
|
help="Autosave the current run into 'STORAGE-PATH/counter_%s.json" % tag, |
|
220
|
|
|
) |
|
221
|
|
|
group.addoption( |
|
222
|
|
|
"--benchmark-save-data", |
|
223
|
|
|
action="store_true", |
|
224
|
|
|
help="Use this to make --benchmark-save and --benchmark-autosave include all the timing data," |
|
225
|
|
|
" not just the stats.", |
|
226
|
|
|
) |
|
227
|
|
|
group.addoption( |
|
228
|
|
|
"--benchmark-json", |
|
229
|
|
|
metavar="PATH", type=argparse.FileType('wb'), |
|
230
|
|
|
help="Dump a JSON report into PATH. " |
|
231
|
|
|
"Note that this will include the complete data (all the timings, not just the stats)." |
|
232
|
|
|
) |
|
233
|
|
|
group.addoption( |
|
234
|
|
|
"--benchmark-compare", |
|
235
|
|
|
metavar="NUM", nargs="?", default=[], const=True, |
|
236
|
|
|
help="Compare the current run against run NUM or the latest saved run if unspecified." |
|
237
|
|
|
) |
|
238
|
|
|
group.addoption( |
|
239
|
|
|
"--benchmark-compare-fail", |
|
240
|
|
|
metavar="EXPR", nargs="+", type=parse_compare_fail, |
|
241
|
|
|
help="Fail test if performance regresses according to given EXPR" |
|
242
|
|
|
" (eg: min:5%% or mean:0.001 for number of seconds). Can be used multiple times." |
|
243
|
|
|
) |
|
244
|
|
|
group.addoption( |
|
245
|
|
|
"--benchmark-project", |
|
246
|
|
|
default=get_project_name(), |
|
247
|
|
|
help="Name of the current project" |
|
248
|
|
|
) |
|
249
|
|
|
add_global_options(group.addoption) |
|
250
|
|
|
add_display_options(group.addoption) |
|
251
|
|
|
add_histogram_options(group.addoption) |
|
252
|
|
|
add_elasticsearch_options(group.addoption) |
|
253
|
|
|
|
|
254
|
|
|
|
|
255
|
|
|
def pytest_addhooks(pluginmanager): |
|
256
|
|
|
from . import hookspec |
|
257
|
|
|
|
|
258
|
|
|
method = getattr(pluginmanager, "add_hookspecs", None) |
|
259
|
|
|
if method is None: |
|
260
|
|
|
method = pluginmanager.addhooks |
|
261
|
|
|
method(hookspec) |
|
262
|
|
|
|
|
263
|
|
|
|
|
264
|
|
|
def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark): |
|
265
|
|
|
machine_info = format_dict(machine_info) |
|
266
|
|
|
compared_machine_info = format_dict(compared_benchmark["machine_info"]) |
|
267
|
|
|
|
|
268
|
|
|
if compared_machine_info != machine_info: |
|
269
|
|
|
benchmarksession.logger.warn( |
|
270
|
|
|
"BENCHMARK-C6", |
|
271
|
|
|
"Benchmark machine_info is different. Current: %s VS saved: %s." % ( |
|
272
|
|
|
machine_info, |
|
273
|
|
|
compared_machine_info, |
|
274
|
|
|
), |
|
275
|
|
|
fslocation=benchmarksession.storage.location |
|
276
|
|
|
) |
|
277
|
|
|
|
|
278
|
|
|
if hasattr(pytest, 'hookimpl'): |
|
279
|
|
|
_hookwrapper = pytest.hookimpl(hookwrapper=True) |
|
280
|
|
|
else: |
|
281
|
|
|
_hookwrapper = pytest.mark.hookwrapper |
|
282
|
|
|
|
|
283
|
|
|
|
|
284
|
|
|
@_hookwrapper |
|
285
|
|
|
def pytest_runtest_call(item): |
|
286
|
|
|
bs = item.config._benchmarksession |
|
287
|
|
|
fixure = hasattr(item, "funcargs") and item.funcargs.get("benchmark") |
|
288
|
|
|
if isinstance(fixure, BenchmarkFixture): |
|
289
|
|
|
if bs.skip: |
|
290
|
|
|
pytest.skip("Skipping benchmark (--benchmark-skip active).") |
|
291
|
|
|
else: |
|
292
|
|
|
yield |
|
293
|
|
|
else: |
|
294
|
|
|
if bs.only: |
|
295
|
|
|
pytest.skip("Skipping non-benchmark (--benchmark-only active).") |
|
296
|
|
|
else: |
|
297
|
|
|
yield |
|
298
|
|
|
|
|
299
|
|
|
|
|
300
|
|
|
def pytest_benchmark_group_stats(config, benchmarks, group_by): |
|
301
|
|
|
groups = defaultdict(list) |
|
302
|
|
|
for bench in benchmarks: |
|
303
|
|
|
key = () |
|
304
|
|
|
for grouping in group_by.split(','): |
|
305
|
|
|
if grouping == "group": |
|
306
|
|
|
key += bench["group"], |
|
307
|
|
|
elif grouping == "name": |
|
308
|
|
|
key += bench["name"], |
|
309
|
|
|
elif grouping == "func": |
|
310
|
|
|
key += bench["name"].split("[")[0], |
|
311
|
|
|
elif grouping == "fullname": |
|
312
|
|
|
key += bench["fullname"], |
|
313
|
|
|
elif grouping == "fullfunc": |
|
314
|
|
|
key += bench["fullname"].split("[")[0], |
|
315
|
|
|
elif grouping == "param": |
|
316
|
|
|
key += bench["param"], |
|
317
|
|
|
elif grouping.startswith("param:"): |
|
318
|
|
|
param_name = grouping[len("param:"):] |
|
319
|
|
|
key += '%s=%s' % (param_name, bench["params"][param_name]), |
|
320
|
|
|
else: |
|
321
|
|
|
raise NotImplementedError("Unsupported grouping %r." % group_by) |
|
322
|
|
|
groups[' '.join(str(p) for p in key if p) or None].append(bench) |
|
323
|
|
|
|
|
324
|
|
|
for grouped_benchmarks in groups.values(): |
|
325
|
|
|
grouped_benchmarks.sort(key=operator.itemgetter("fullname" if "full" in group_by else "name")) |
|
326
|
|
|
return sorted(groups.items(), key=lambda pair: pair[0] or "") |
|
327
|
|
|
|
|
328
|
|
|
|
|
329
|
|
|
@_hookwrapper |
|
330
|
|
|
def pytest_sessionfinish(session, exitstatus): |
|
331
|
|
|
session.config._benchmarksession.finish() |
|
332
|
|
|
yield |
|
333
|
|
|
|
|
334
|
|
|
|
|
335
|
|
|
def pytest_terminal_summary(terminalreporter): |
|
336
|
|
|
try: |
|
337
|
|
|
terminalreporter.config._benchmarksession.display(terminalreporter) |
|
338
|
|
|
except PerformanceRegression: |
|
339
|
|
|
raise |
|
340
|
|
|
except Exception: |
|
341
|
|
|
terminalreporter.config._benchmarksession.logger.error("\n%s" % traceback.format_exc()) |
|
342
|
|
|
raise |
|
343
|
|
|
|
|
344
|
|
|
|
|
345
|
|
|
def pytest_benchmark_generate_machine_info(): |
|
346
|
|
|
python_implementation = platform.python_implementation() |
|
347
|
|
|
python_implementation_version = platform.python_version() |
|
348
|
|
|
if python_implementation == 'PyPy': |
|
349
|
|
|
python_implementation_version = '%d.%d.%d' % sys.pypy_version_info[:3] |
|
350
|
|
|
if sys.pypy_version_info.releaselevel != 'final': |
|
351
|
|
|
python_implementation_version += '-%s%d' % sys.pypy_version_info[3:] |
|
352
|
|
|
return { |
|
353
|
|
|
"node": platform.node(), |
|
354
|
|
|
"processor": platform.processor(), |
|
355
|
|
|
"machine": platform.machine(), |
|
356
|
|
|
"python_compiler": platform.python_compiler(), |
|
357
|
|
|
"python_implementation": python_implementation, |
|
358
|
|
|
"python_implementation_version": python_implementation_version, |
|
359
|
|
|
"python_version": platform.python_version(), |
|
360
|
|
|
"python_build": platform.python_build(), |
|
361
|
|
|
"release": platform.release(), |
|
362
|
|
|
"system": platform.system() |
|
363
|
|
|
} |
|
364
|
|
|
|
|
365
|
|
|
|
|
366
|
|
|
def pytest_benchmark_generate_commit_info(config): |
|
367
|
|
|
return get_commit_info() |
|
368
|
|
|
|
|
369
|
|
|
|
|
370
|
|
|
def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_info, commit_info): |
|
371
|
|
|
benchmarks_json = [] |
|
372
|
|
|
output_json = { |
|
373
|
|
|
"machine_info": machine_info, |
|
374
|
|
|
"commit_info": commit_info, |
|
375
|
|
|
"benchmarks": benchmarks_json, |
|
376
|
|
|
"datetime": datetime.utcnow().isoformat(), |
|
377
|
|
|
"version": __version__, |
|
378
|
|
|
} |
|
379
|
|
|
for bench in benchmarks: |
|
380
|
|
|
if not bench.has_error: |
|
381
|
|
|
benchmarks_json.append(bench.as_dict(include_data=include_data)) |
|
382
|
|
|
return output_json |
|
383
|
|
|
|
|
384
|
|
|
|
|
385
|
|
|
@pytest.fixture(scope="function") |
|
386
|
|
|
def benchmark(request): |
|
387
|
|
|
bs = request.config._benchmarksession |
|
388
|
|
|
|
|
389
|
|
|
if bs.skip: |
|
390
|
|
|
pytest.skip("Benchmarks are skipped (--benchmark-skip was used).") |
|
391
|
|
|
else: |
|
392
|
|
|
node = request.node |
|
393
|
|
|
marker = node.get_marker("benchmark") |
|
394
|
|
|
options = marker.kwargs if marker else {} |
|
395
|
|
|
if "timer" in options: |
|
396
|
|
|
options["timer"] = NameWrapper(options["timer"]) |
|
397
|
|
|
fixture = BenchmarkFixture( |
|
398
|
|
|
node, |
|
399
|
|
|
add_stats=bs.benchmarks.append, |
|
400
|
|
|
logger=bs.logger, |
|
401
|
|
|
warner=request.node.warn, |
|
402
|
|
|
disabled=bs.disabled, |
|
403
|
|
|
**dict(bs.options, **options) |
|
404
|
|
|
) |
|
405
|
|
|
request.addfinalizer(fixture._cleanup) |
|
406
|
|
|
return fixture |
|
407
|
|
|
|
|
408
|
|
|
|
|
409
|
|
|
@pytest.fixture(scope="function") |
|
410
|
|
|
def benchmark_weave(benchmark): |
|
411
|
|
|
return benchmark.weave |
|
412
|
|
|
|
|
413
|
|
|
|
|
414
|
|
|
def pytest_runtest_setup(item): |
|
415
|
|
|
marker = item.get_marker("benchmark") |
|
416
|
|
|
if marker: |
|
417
|
|
|
if marker.args: |
|
418
|
|
|
raise ValueError("benchmark mark can't have positional arguments.") |
|
419
|
|
|
for name in marker.kwargs: |
|
420
|
|
|
if name not in ( |
|
421
|
|
|
"max_time", "min_rounds", "min_time", "timer", "group", "disable_gc", "warmup", |
|
422
|
|
|
"warmup_iterations", "calibration_precision"): |
|
423
|
|
|
raise ValueError("benchmark mark can't have %r keyword argument." % name) |
|
424
|
|
|
|
|
425
|
|
|
|
|
426
|
|
|
def get_report_backend(config): |
|
427
|
|
|
if config.getoption("benchmark_elasticsearch"): |
|
428
|
|
|
return ElasticReportBackend(config) |
|
429
|
|
|
else: |
|
430
|
|
|
return FileReportBackend(config) |
|
431
|
|
|
|
|
432
|
|
|
|
|
433
|
|
|
@pytest.mark.trylast # force the other plugins to initialise, fixes issue with capture not being properly initialised |
|
434
|
|
|
def pytest_configure(config): |
|
435
|
|
|
config.addinivalue_line("markers", "benchmark: mark a test with custom benchmark settings.") |
|
436
|
|
|
config._benchmarksession = BenchmarkSession(config, get_report_backend(config)) |
|
437
|
|
|
config.pluginmanager.register(config._benchmarksession, "pytest-benchmark") |
|
438
|
|
|
|