Completed
Pull Request — master (#58)
by
unknown
01:18
created

BenchmarkSession   A

Complexity

Total Complexity 27

Size/Duplication

Total Lines 127
Duplicated Lines 0 %

Importance

Changes 15
Bugs 0 Features 0
Metric Value
c 15
b 0
f 0
dl 0
loc 127
rs 10
wmc 27

6 Methods

Rating   Name   Duplication   Size   Complexity  
B check_regressions() 0 9 5
A finish() 0 9 2
A display() 0 14 2
C __init__() 0 55 9
C prepare_benchmarks() 0 22 8
A machine_info() 0 8 1
1
from __future__ import division
2
from __future__ import print_function
3
4
import pytest
5
6
from .fixture import statistics
7
from .fixture import statistics_error
8
from .logger import Logger
9
from .table import TableResults
10
from .utils import NAME_FORMATTERS
11
from .utils import SecondsDecimal
12
from .utils import cached_property
13
from .utils import first_or_value
14
from .utils import get_machine_id
15
from .utils import load_timer
16
from .utils import short_filename
17
18
19
class PerformanceRegression(pytest.UsageError):
20
    pass
21
22
23
class BenchmarkSession(object):
24
    compared_mapping = None
25
    groups = None
26
27
    def __init__(self, config, report_backend):
28
        self.verbose = config.getoption("benchmark_verbose")
29
        self.logger = Logger(self.verbose, config)
30
        self.config = config
31
        self.performance_regressions = []
32
        self.benchmarks = []
33
        self.machine_id = get_machine_id()
34
        self.report_backend = report_backend
35
36
        self.options = dict(
37
            min_time=SecondsDecimal(config.getoption("benchmark_min_time")),
38
            min_rounds=config.getoption("benchmark_min_rounds"),
39
            max_time=SecondsDecimal(config.getoption("benchmark_max_time")),
40
            timer=load_timer(config.getoption("benchmark_timer")),
41
            calibration_precision=config.getoption("benchmark_calibration_precision"),
42
            disable_gc=config.getoption("benchmark_disable_gc"),
43
            warmup=config.getoption("benchmark_warmup"),
44
            warmup_iterations=config.getoption("benchmark_warmup_iterations"),
45
        )
46
        self.skip = config.getoption("benchmark_skip")
47
        self.disabled = config.getoption("benchmark_disable") and not config.getoption("benchmark_enable")
48
49
        if config.getoption("dist", "no") != "no" and not self.skip:
50
            self.logger.warn(
51
                "BENCHMARK-U2",
52
                "Benchmarks are automatically disabled because xdist plugin is active."
53
                "Benchmarks cannot be performed reliably in a parallelized environment.",
54
                fslocation="::"
55
            )
56
            self.disabled = True
57
        if hasattr(config, "slaveinput"):
58
            self.disabled = True
59
        if not statistics:
60
            self.logger.warn(
61
                "BENCHMARK-U3",
62
                "Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" %
63
                statistics_error,
64
                fslocation="::"
65
            )
66
            self.disabled = True
67
68
        self.only = config.getoption("benchmark_only")
69
        self.sort = config.getoption("benchmark_sort")
70
        self.columns = config.getoption("benchmark_columns")
71
        if self.skip and self.only:
72
            raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.")
73
        if self.disabled and self.only:
74
            raise pytest.UsageError(
75
                "Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
76
                "automatically activated if xdist is on or you're missing the statistics dependency.")
77
        self.group_by = config.getoption("benchmark_group_by")
78
        self.compare_fail = config.getoption("benchmark_compare_fail")
79
        self.name_format = NAME_FORMATTERS[config.getoption("benchmark_name")]
80
81
        self.histogram = first_or_value(config.getoption("benchmark_histogram"), False)
82
83
    @cached_property
84
    def machine_info(self):
85
        obj = self.config.hook.pytest_benchmark_generate_machine_info(config=self.config)
86
        self.config.hook.pytest_benchmark_update_machine_info(
87
            config=self.config,
88
            machine_info=obj
89
        )
90
        return obj
91
92
    def prepare_benchmarks(self):
93
        for bench in self.benchmarks:
94
            if bench:
95
                compared = False
96
                for path, compared_mapping in self.compared_mapping.items():
97
                    if bench.fullname in compared_mapping:
98
                        compared = compared_mapping[bench.fullname]
99
                        source = short_filename(path, self.machine_id)
100
                        flat_bench = bench.as_dict(include_data=False, stats=False)
101
                        flat_bench.update(compared["stats"])
102
                        flat_bench["path"] = str(path)
103
                        flat_bench["source"] = source
104
                        if self.compare_fail:
105
                            for check in self.compare_fail:
106
                                fail = check.fails(bench, flat_bench)
107
                                if fail:
108
                                    self.performance_regressions.append((self.name_format(flat_bench), fail))
109
                        yield flat_bench
110
                flat_bench = bench.as_dict(include_data=False, flat=True)
111
                flat_bench["path"] = None
112
                flat_bench["source"] = compared and "NOW"
113
                yield flat_bench
114
115
    def finish(self):
116
        self.report_backend.handle_saving(self.benchmarks, self.machine_info)
117
        self.compared_mapping = self.report_backend.handle_loading(self.machine_info)
118
        prepared_benchmarks = list(self.prepare_benchmarks())
119
        if prepared_benchmarks:
120
            self.groups = self.config.hook.pytest_benchmark_group_stats(
121
                config=self.config,
122
                benchmarks=prepared_benchmarks,
123
                group_by=self.group_by
124
            )
125
126
    def display(self, tr):
127
        if not self.groups:
128
            return
129
130
        tr.ensure_newline()
131
        results_table = TableResults(
132
            columns=self.columns,
133
            sort=self.sort,
134
            histogram=self.histogram,
135
            name_format=self.name_format,
136
            logger=self.logger
137
        )
138
        results_table.display(tr, self.groups)
139
        self.check_regressions()
140
141
    def check_regressions(self):
142
        if self.compare_fail and not self.compared_mapping:
143
            raise pytest.UsageError("--benchmark-compare-fail requires valid --benchmark-compare.")
144
145
        if self.performance_regressions:
146
            self.logger.error("Performance has regressed:\n%s" % "\n".join(
147
                "\t%s - %s" % line for line in self.performance_regressions
148
            ))
149
            raise PerformanceRegression("Performance has regressed.")
150