Completed
Pull Request — master (#59)
by
unknown
01:10
created

Namespace.getoption()   A

Complexity

Conditions 2

Size

Total Lines 5

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 0
Metric Value
cc 2
c 1
b 0
f 0
dl 0
loc 5
rs 9.4285
1
import json
2
import logging
3
import os
4
import sys
5
from io import BytesIO
6
from io import StringIO
7
8
import py
9
import pytest
10
from freezegun import freeze_time
11
from pathlib import Path
12
13
from pytest_benchmark import plugin
14
from pytest_benchmark.plugin import BenchmarkSession
15
from pytest_benchmark.plugin import pytest_benchmark_compare_machine_info
16
from pytest_benchmark.plugin import pytest_benchmark_generate_json
17
from pytest_benchmark.plugin import pytest_benchmark_group_stats
18
from pytest_benchmark.session import PerformanceRegression
19
from pytest_benchmark.storage import Storage
20
from pytest_benchmark.utils import NAME_FORMATTERS
21
from pytest_benchmark.utils import DifferenceRegressionCheck
22
from pytest_benchmark.utils import PercentageRegressionCheck
23
from pytest_benchmark.utils import get_machine_id
24
25
26
pytest_plugins = "pytester"
27
28
29
THIS = py.path.local(__file__)
30
STORAGE = THIS.dirpath(THIS.purebasename)
31
32
SAVE_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open())
33
JSON_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open())
34
SAVE_DATA["machine_info"] = JSON_DATA["machine_info"] = {'foo': 'bar'}
35
SAVE_DATA["commit_info"] = JSON_DATA["commit_info"] = {'foo': 'bar'}
36
37
38
class Namespace(object):
39
    def __init__(self, **kwargs):
40
        self.__dict__.update(kwargs)
41
42
    def __getitem__(self, item):
43
        return self.__dict__[item]
44
45
    def getoption(self, item, default=None):
46
        try:
47
            return self[item]
48
        except KeyError:
49
            return default
50
51
52
class LooseFileLike(BytesIO):
53
    def close(self):
54
        value = self.getvalue()
55
        super(LooseFileLike, self).close()
56
        self.getvalue = lambda: value
57
58
59
class MockSession(BenchmarkSession):
60
    def __init__(self, name_format):
61
        self.histogram = True
62
        self.storage = Storage(str(STORAGE), default_machine_id=get_machine_id(), logger=None)
63
        self.benchmarks = []
64
        self.performance_regressions = []
65
        self.sort = u"min"
66
        self.compare = '0001'
67
        self.logger = logging.getLogger(__name__)
68
        self.machine_id = "FoobarOS"
69
        self.machine_info = {'foo': 'bar'}
70
        self.name_format = NAME_FORMATTERS[name_format]
71
        self.save = self.autosave = self.json = False
72
        self.options = {
73
            'min_rounds': 123,
74
            'min_time': 234,
75
            'max_time': 345,
76
            'use_cprofile': False,
77
        }
78
        self.cprofile_sort_by = 'cumtime'
79
        self.compare_fail = []
80
        self.config = Namespace(hook=Namespace(
81
            pytest_benchmark_group_stats=pytest_benchmark_group_stats,
82
            pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'},
83
            pytest_benchmark_update_machine_info=lambda **kwargs: None,
84
            pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info,
85
            pytest_benchmark_generate_json=pytest_benchmark_generate_json,
86
            pytest_benchmark_update_json=lambda **kwargs: None,
87
            pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'},
88
            pytest_benchmark_update_commit_info=lambda **kwargs: None,
89
        ))
90
        self.group_by = 'group'
91
        self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr',
92
                        'outliers', 'rounds', 'iterations']
93
        for bench_file in reversed(self.storage.query("[0-9][0-9][0-9][0-9]_*")):
94
            with bench_file.open('rU') as fh:
95
                data = json.load(fh)
96
            self.benchmarks.extend(
97
                Namespace(
98
                    as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench,
99
                                   cprofile_sort_by='cumtime', cprofile_all_columns=False:
100
                        dict(_bench, **_bench["stats"]) if flat else dict(_bench),
101
                    name=bench['name'],
102
                    fullname=bench['fullname'],
103
                    group=bench['group'],
104
                    options=bench['options'],
105
                    has_error=False,
106
                    params=None,
107
                    **bench['stats']
108
                )
109
                for bench in data['benchmarks']
110
            )
111
            break
112
113
114
try:
115
    text_type = unicode
116
except NameError:
117
    text_type = str
118
119
120
def force_text(text):
121
    if isinstance(text, text_type):
122
        return text
123
    else:
124
        return text.decode('utf-8')
125
126
127
def force_bytes(text):
128
    if isinstance(text, text_type):
129
        return text.encode('utf-8')
130
    else:
131
        return text
132
133
134
@pytest.fixture(params=['short', 'normal', 'long'])
135
def name_format(request):
136
    return request.param
137
138
139
@pytest.fixture
140
def sess(request, name_format):
141
    return MockSession(name_format)
142
143
144
def make_logger(sess):
145
    output = StringIO()
146
    sess.logger = Namespace(
147
        warn=lambda code, text, **opts: output.write(u"%s: %s %s\n" % (code, force_text(text), opts)),
148
        info=lambda text, **opts: output.write(force_text(text) + u'\n'),
149
        error=lambda text: output.write(force_text(text) + u'\n'),
150
    )
151
    return output
152
153
154
def test_rendering(sess):
155
    output = make_logger(sess)
156
    sess.histogram = os.path.join('docs', 'sample')
157
    sess.compare = '*/*'
158
    sess.sort = 'name'
159
    sess.finish()
160
    sess.display(Namespace(
161
        ensure_newline=lambda: None,
162
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
163
        write=lambda text, **opts: output.write(force_text(text)),
164
        rewrite=lambda text, **opts: output.write(force_text(text)),
165
    ))
166
167
168 View Code Duplication
def test_regression_checks(sess, name_format):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
169
    output = make_logger(sess)
170
    sess.handle_loading()
171
    sess.performance_regressions = []
172
    sess.compare_fail = [
173
        PercentageRegressionCheck("stddev", 5),
174
        DifferenceRegressionCheck("max", 0.000001)
175
    ]
176
    sess.finish()
177
    pytest.raises(PerformanceRegression, sess.display, Namespace(
178
        ensure_newline=lambda: None,
179
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
180
        write=lambda text, **opts: output.write(force_text(text)),
181
        rewrite=lambda text, **opts: output.write(force_text(text)),
182
    ))
183
    print(output.getvalue())
184
    assert sess.performance_regressions == {
185
        'normal': [
186
            ('test_xfast_parametrized[0] (0001_b87b9aa)',
187
             "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
188
            ('test_xfast_parametrized[0] (0001_b87b9aa)',
189
             "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000")
190
        ],
191
        'short': [
192
            ('xfast_parametrized[0] (0001)',
193
             "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
194
            ('xfast_parametrized[0] (0001)',
195
             "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000")
196
        ],
197
        'long': [
198
            ('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)',
199
             "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
200
            ('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)',
201
             "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000")
202
        ],
203
    }[name_format]
204
    output = make_logger(sess)
205
    pytest.raises(PerformanceRegression, sess.check_regressions)
206
    print(output.getvalue())
207
    assert output.getvalue() == {
208
        'short': """Performance has regressed:
209
\txfast_parametrized[0] (0001) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
210
\txfast_parametrized[0] (0001) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
211
""",
212
        'normal': """Performance has regressed:
213
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
214
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
215
""",
216
        'long': """Performance has regressed:
217
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
218
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
219
"""
220
    }[name_format]
221
222
223 View Code Duplication
@pytest.mark.skipif(sys.version_info[:2] < (2, 7),
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
224
                    reason="Something weird going on, see: https://bugs.python.org/issue4482")
225
def test_regression_checks_inf(sess, name_format):
226
    output = make_logger(sess)
227
    sess.compare = '0002'
228
    sess.handle_loading()
229
    sess.performance_regressions = []
230
    sess.compare_fail = [
231
        PercentageRegressionCheck("stddev", 5),
232
        DifferenceRegressionCheck("max", 0.000001)
233
    ]
234
    sess.finish()
235
    pytest.raises(PerformanceRegression, sess.display, Namespace(
236
        ensure_newline=lambda: None,
237
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
238
        write=lambda text, **opts: output.write(force_text(text)),
239
        rewrite=lambda text, **opts: output.write(force_text(text)),
240
    ))
241
    print(output.getvalue())
242
    assert sess.performance_regressions == {
243
        'normal': [
244
            ('test_xfast_parametrized[0] (0002_b87b9aa)',
245
             "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
246
            ('test_xfast_parametrized[0] (0002_b87b9aa)',
247
             "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000")
248
        ],
249
        'short': [
250
            ('xfast_parametrized[0] (0002)',
251
             "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
252
            ('xfast_parametrized[0] (0002)',
253
             "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000")
254
        ],
255
        'long': [
256
            ('tests/test_normal.py::test_xfast_parametrized[0] '
257
             '(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)',
258
             "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
259
            ('tests/test_normal.py::test_xfast_parametrized[0] '
260
             '(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)',
261
             "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > "
262
             '0.000001000')
263
        ]
264
    }[name_format]
265
    output = make_logger(sess)
266
    pytest.raises(PerformanceRegression, sess.check_regressions)
267
    print(output.getvalue())
268
    assert output.getvalue() == {
269
        'short': """Performance has regressed:
270
\txfast_parametrized[0] (0002) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
271
\txfast_parametrized[0] (0002) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
272
""",
273
        'normal': """Performance has regressed:
274
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
275
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
276
""",
277
        'long': """Performance has regressed:
278
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
279
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
280
"""
281
    }[name_format]
282
283
284 View Code Duplication
def test_compare_1(sess, LineMatcher):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
285
    output = make_logger(sess)
286
    sess.handle_loading()
287
    sess.finish()
288
    sess.display(Namespace(
289
        ensure_newline=lambda: None,
290
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
291
        write=lambda text, **opts: output.write(force_text(text)),
292
        rewrite=lambda text, **opts: output.write(force_text(text)),
293
    ))
294
    print(output.getvalue())
295
    LineMatcher(output.getvalue().splitlines()).fnmatch_lines([
296
        'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}',
297
        'Comparing against benchmarks from: 0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted'
298
        '-changes.json',
299
        '',
300
        '*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*',
301
        'Name (time in ns)               *      Min                 *Max                Mean              StdDev              Median                IQR            Outliers(*)  Rounds  Iterations',
302
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
303
        '*xfast_parametrized[[]0[]] (0001*)     217.3145 (1.0)      11*447.3891 (1.0)      262.2408 (1.00)     214.0442 (1.0)      220.1664 (1.00)     38.2154 (2.03)         90;1878    9987         418',
304
        '*xfast_parametrized[[]0[]] (NOW) *     217.9511 (1.00)     13*290.0380 (1.16)     261.2051 (1.0)      263.9842 (1.23)     220.1638 (1.0)      18.8080 (1.0)         160;1726    9710         431',
305
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
306
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
307
    ])
308
309
310 View Code Duplication
def test_compare_2(sess, LineMatcher):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
311
    output = make_logger(sess)
312
    sess.compare = '0002'
313
    sess.handle_loading()
314
    sess.finish()
315
    sess.display(Namespace(
316
        ensure_newline=lambda: None,
317
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
318
        section=lambda line, **opts: output.write(force_text(line) + u'\n'),
319
        write=lambda text, **opts: output.write(force_text(text)),
320
        rewrite=lambda text, **opts: output.write(force_text(text)),
321
    ))
322
    print(output.getvalue())
323
    LineMatcher(output.getvalue().splitlines()).fnmatch_lines([
324
        'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}',
325
        'Comparing against benchmarks from: 0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes.json',
326
        '',
327
        '*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*',
328
        'Name (time in ns)            *         Min                 *Max                Mean              StdDev              Median                IQR            Outliers(*)  Rounds  Iterations',
329
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
330
        '*xfast_parametrized[[]0[]] (0002*)     216.9028 (1.0)       7*739.2997 (1.0)      254.0585 (1.0)        0.0000 (1.0)      219.8103 (1.0)      27.3309 (1.45)        235;1688   11009         410',
331
        '*xfast_parametrized[[]0[]] (NOW) *     217.9511 (1.00)     13*290.0380 (1.72)     261.2051 (1.03)     263.9842 (inf)      220.1638 (1.00)     18.8080 (1.0)         160;1726    9710         431',
332
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
333
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
334
    ])
335
336
@freeze_time("2015-08-15T00:04:18.687119")
337
def test_save_json(sess, tmpdir, monkeypatch):
338
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
339
    sess.save = False
340
    sess.autosave = False
341
    sess.json = LooseFileLike()
342
    sess.save_data = False
343
    sess.handle_saving()
344
    assert tmpdir.listdir() == []
345
    assert json.loads(sess.json.getvalue().decode()) == JSON_DATA
346
347
348 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
349
def test_save_with_name(sess, tmpdir, monkeypatch):
350
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
351
    sess.save = 'foobar'
352
    sess.autosave = True
353
    sess.json = None
354
    sess.save_data = False
355
    sess.storage.path = Path(str(tmpdir))
356
    sess.handle_saving()
357
    files = list(Path(str(tmpdir)).rglob('*.json'))
358
    assert len(files) == 1
359
    assert json.load(files[0].open('rU')) == SAVE_DATA
360
361
362 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
363
def test_save_no_name(sess, tmpdir, monkeypatch):
364
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
365
    sess.save = True
366
    sess.autosave = True
367
    sess.json = None
368
    sess.save_data = False
369
    sess.storage.path = Path(str(tmpdir))
370
    sess.handle_saving()
371
    files = list(Path(str(tmpdir)).rglob('*.json'))
372
    assert len(files) == 1
373
    assert json.load(files[0].open('rU')) == SAVE_DATA
374
375
376 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
377
def test_save_with_error(sess, tmpdir, monkeypatch):
378
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
379
    sess.save = True
380
    sess.autosave = True
381
    sess.json = None
382
    sess.save_data = False
383
    sess.storage.path = Path(str(tmpdir))
384
    for bench in sess.benchmarks:
385
        bench.has_error = True
386
    sess.handle_saving()
387
    files = list(Path(str(tmpdir)).rglob('*.json'))
388
    assert len(files) == 1
389
    assert json.load(files[0].open('rU')) == {
390
        'benchmarks': [],
391
        'commit_info': {'foo': 'bar'},
392
        'datetime': '2015-08-15T00:04:18.687119',
393
        'machine_info': {'foo': 'bar'},
394
        'version': '2.5.0'
395
    }
396
397
398 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
399
def test_autosave(sess, tmpdir, monkeypatch):
400
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
401
    sess.save = False
402
    sess.autosave = True
403
    sess.json = None
404
    sess.save_data = False
405
    sess.storage.path = Path(str(tmpdir))
406
    sess.handle_saving()
407
    files = list(Path(str(tmpdir)).rglob('*.json'))
408
    assert len(files) == 1
409
    assert json.load(files[0].open('rU')) == SAVE_DATA
410