Completed
Push — master ( 0887a7...b18166 )
by Ionel Cristian
59s
created

src.pytest_benchmark.BenchmarkFixture   D

Complexity

Total Complexity 58

Size/Duplication

Total Lines 250
Duplicated Lines 0 %
Metric Value
dl 0
loc 250
rs 4.8387
wmc 58

16 Methods

Rating   Name   Duplication   Size   Complexity  
A _get_precision() 0 6 2
B __init__() 0 26 2
B _raw() 0 24 5
D _calibrate_timer() 0 42 8
A aspect() 0 5 2
A enabled() 0 3 1
A _cleanup() 0 7 3
B runner() 0 22 5
F _raw_pedantic() 0 53 20
A weave() 0 13 4
A __call__() 0 11 3
B _make_runner() 0 25 6
A make_arguments() 0 8 4
A pedantic() 0 12 3
A wrapper() 0 2 1
A _make_stats() 0 12 1

How to fix   Complexity   

Complex Class

Complex classes like src.pytest_benchmark.BenchmarkFixture often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
from __future__ import division
2
from __future__ import print_function
3
4
import gc
5
import sys
6
import time
7
import traceback
8
from math import ceil
9
10
from .compat import INT
11
from .compat import XRANGE
12
from .timers import compute_timer_precision
13
from .utils import format_time
14
15
try:
16
    import statistics
17
except (ImportError, SyntaxError):
18
    statistics_error = traceback.format_exc()
19
    statistics = None
20
else:
21
    statistics_error = None
22
    from .stats import BenchmarkStats
23
24
25
class FixtureAlreadyUsed(Exception):
26
    pass
27
28
29
class BenchmarkFixture(object):
30
    _precisions = {}
31
32
    @classmethod
33
    def _get_precision(cls, timer):
34
        if timer in cls._precisions:
35
            return cls._precisions[timer]
36
        else:
37
            return cls._precisions.setdefault(timer, compute_timer_precision(timer))
38
39
    def __init__(self, node, disable_gc, timer, min_rounds, min_time, max_time, warmup, warmup_iterations,
40
                 calibration_precision, add_stats, logger, warner, disabled, group=None):
41
        self.name = node.name
42
        self.fullname = node._nodeid
43
        self.disabled = disabled
44
        if hasattr(node, 'callspec'):
45
            self.param = node.callspec.id
46
            self.params = node.callspec.params
47
        else:
48
            self.param = None
49
            self.params = None
50
        self.group = group
51
        self.has_error = False
52
53
        self._disable_gc = disable_gc
54
        self._timer = timer.target
55
        self._min_rounds = min_rounds
56
        self._max_time = float(max_time)
57
        self._min_time = float(min_time)
58
        self._add_stats = add_stats
59
        self._calibration_precision = calibration_precision
60
        self._warmup = warmup and warmup_iterations
61
        self._logger = logger
62
        self._warner = warner
63
        self._cleanup_callbacks = []
64
        self._mode = None
65
66
    @property
67
    def enabled(self):
68
        return not self.disabled
69
70
    def _make_runner(self, function_to_benchmark, args, kwargs):
71
        def runner(loops_range, timer=self._timer):
72
            gc_enabled = gc.isenabled()
73
            if self._disable_gc:
74
                gc.disable()
75
            tracer = sys.gettrace()
76
            sys.settrace(None)
77
            try:
78
                if loops_range:
79
                    start = timer()
80
                    for _ in loops_range:
81
                        function_to_benchmark(*args, **kwargs)
82
                    end = timer()
83
                    return end - start
84
                else:
85
                    start = timer()
86
                    result = function_to_benchmark(*args, **kwargs)
87
                    end = timer()
88
                    return end - start, result
89
            finally:
90
                sys.settrace(tracer)
91
                if gc_enabled:
92
                    gc.enable()
93
94
        return runner
95
96
    def _make_stats(self, iterations):
97
        bench_stats = BenchmarkStats(self, iterations=iterations, options={
98
            "disable_gc": self._disable_gc,
99
            "timer": self._timer,
100
            "min_rounds": self._min_rounds,
101
            "max_time": self._max_time,
102
            "min_time": self._min_time,
103
            "warmup": self._warmup,
104
        })
105
        self._add_stats(bench_stats)
106
        self.stats = bench_stats.stats
107
        return bench_stats
108
109
    def __call__(self, function_to_benchmark, *args, **kwargs):
110
        if self._mode:
111
            self.has_error = True
112
            raise FixtureAlreadyUsed(
113
                "Fixture can only be used once. Previously it was used in %s mode." % self._mode)
114
        try:
115
            self._mode = 'benchmark(...)'
116
            return self._raw(function_to_benchmark, *args, **kwargs)
117
        except Exception:
118
            self.has_error = True
119
            raise
120
121
    def pedantic(self, target, args=(), kwargs=None, setup=None, rounds=1, warmup_rounds=0, iterations=1):
122
        if self._mode:
123
            self.has_error = True
124
            raise FixtureAlreadyUsed(
125
                "Fixture can only be used once. Previously it was used in %s mode." % self._mode)
126
        try:
127
            self._mode = 'benchmark.pedantic(...)'
128
            return self._raw_pedantic(target, args=args, kwargs=kwargs, setup=setup, rounds=rounds,
129
                                      warmup_rounds=warmup_rounds, iterations=iterations)
130
        except Exception:
131
            self.has_error = True
132
            raise
133
134
    def _raw(self, function_to_benchmark, *args, **kwargs):
135
        if not self.disabled:
136
            runner = self._make_runner(function_to_benchmark, args, kwargs)
137
138
            duration, iterations, loops_range = self._calibrate_timer(runner)
139
140
            # Choose how many time we must repeat the test
141
            rounds = int(ceil(self._max_time / duration))
142
            rounds = max(rounds, self._min_rounds)
143
            rounds = min(rounds, sys.maxsize)
144
145
            stats = self._make_stats(iterations)
146
147
            self._logger.debug("  Running %s rounds x %s iterations ..." % (rounds, iterations), yellow=True, bold=True)
148
            run_start = time.time()
149
            if self._warmup:
150
                warmup_rounds = min(rounds, max(1, int(self._warmup / iterations)))
151
                self._logger.debug("  Warmup %s rounds x %s iterations ..." % (warmup_rounds, iterations))
152
                for _ in XRANGE(warmup_rounds):
153
                    runner(loops_range)
154
            for _ in XRANGE(rounds):
155
                stats.update(runner(loops_range))
156
            self._logger.debug("  Ran for %ss." % format_time(time.time() - run_start), yellow=True, bold=True)
157
        return function_to_benchmark(*args, **kwargs)
158
159
    def _raw_pedantic(self, target, args=(), kwargs=None, setup=None, rounds=1, warmup_rounds=0, iterations=1):
160
        if kwargs is None:
161
            kwargs = {}
162
163
        has_args = bool(args or kwargs)
164
165
        if not isinstance(iterations, INT) or iterations < 1:
166
            raise ValueError("Must have positive int for `iterations`.")
167
168
        if not isinstance(rounds, INT) or rounds < 1:
169
            raise ValueError("Must have positive int for `rounds`.")
170
171
        if not isinstance(warmup_rounds, INT) or warmup_rounds < 0:
172
            raise ValueError("Must have positive int for `warmup_rounds`.")
173
174
        if iterations > 1 and setup:
175
            raise ValueError("Can't use more than 1 `iterations` with a `setup` function.")
176
177
        def make_arguments(args=args, kwargs=kwargs):
178
            if setup:
179
                maybe_args = setup()
180
                if maybe_args:
181
                    if has_args:
182
                        raise TypeError("Can't use `args` or `kwargs` if `setup` returns the arguments.")
183
                    args, kwargs = maybe_args
184
            return args, kwargs
185
186
        if self.disabled:
187
            args, kwargs = make_arguments()
188
            return target(*args, **kwargs)
189
190
        stats = self._make_stats(iterations)
191
        loops_range = XRANGE(iterations) if iterations > 1 else None
192
        for _ in XRANGE(warmup_rounds):
193
            args, kwargs = make_arguments()
194
195
            runner = self._make_runner(target, args, kwargs)
196
            runner(loops_range)
197
198
        for _ in XRANGE(rounds):
199
            args, kwargs = make_arguments()
200
201
            runner = self._make_runner(target, args, kwargs)
202
            if loops_range:
203
                duration = runner(loops_range)
204
            else:
205
                duration, result = runner(loops_range)
206
            stats.update(duration)
207
208
        if loops_range:
209
            args, kwargs = make_arguments()
210
            result = target(*args, **kwargs)
211
        return result
212
213
    def weave(self, target, **kwargs):
214
        try:
215
            import aspectlib
216
        except ImportError as exc:
217
            raise ImportError(exc.args, "Please install aspectlib or pytest-benchmark[aspect]")
218
219
        def aspect(function):
220
            def wrapper(*args, **kwargs):
221
                return self(function, *args, **kwargs)
222
223
            return wrapper
224
225
        self._cleanup_callbacks.append(aspectlib.weave(target, aspect, **kwargs).rollback)
226
227
    patch = weave
228
229
    def _cleanup(self):
230
        while self._cleanup_callbacks:
231
            callback = self._cleanup_callbacks.pop()
232
            callback()
233
        if not self._mode:
234
            self._logger.warn("BENCHMARK-U1", "Benchmark fixture was not used at all in this test!",
235
                              warner=self._warner, suspend=True)
236
237
    def _calibrate_timer(self, runner):
238
        timer_precision = self._get_precision(self._timer)
239
        min_time = max(self._min_time, timer_precision * self._calibration_precision)
240
        min_time_estimate = min_time * 5 / self._calibration_precision
241
        self._logger.debug("")
242
        self._logger.debug("  Timer precision: %ss" % format_time(timer_precision), yellow=True, bold=True)
243
        self._logger.debug("  Calibrating to target round %ss; will estimate when reaching %ss." % (
244
            format_time(min_time), format_time(min_time_estimate)), yellow=True, bold=True)
245
246
        loops = 1
247
        while True:
248
            loops_range = XRANGE(loops)
249
            duration = runner(loops_range)
250
            if self._warmup:
251
                warmup_start = time.time()
252
                warmup_iterations = 0
253
                warmup_rounds = 0
254
                while time.time() - warmup_start < self._max_time and warmup_iterations < self._warmup:
255
                    duration = min(duration, runner(loops_range))
256
                    warmup_rounds += 1
257
                    warmup_iterations += loops
258
                self._logger.debug("    Warmup: %ss (%s x %s iterations)." % (
259
                    format_time(time.time() - warmup_start),
260
                    warmup_rounds, loops
261
                ))
262
263
            self._logger.debug("    Measured %s iterations: %ss." % (loops, format_time(duration)), yellow=True)
264
            if duration >= min_time:
265
                break
266
267
            if duration >= min_time_estimate:
268
                # coarse estimation of the number of loops
269
                loops = int(ceil(min_time * loops / duration))
270
                self._logger.debug("    Estimating %s iterations." % loops, green=True)
271
                if loops == 1:
272
                    # If we got a single loop then bail early - nothing to calibrate if the the
273
                    # test function is 100 times slower than the timer resolution.
274
                    loops_range = XRANGE(loops)
275
                    break
276
            else:
277
                loops *= 10
278
        return duration, loops, loops_range
279