Completed
Push — master ( 9d6e1e...1ea530 )
by Ionel Cristian
01:04
created

test_list()   B

Complexity

Conditions 3

Size

Total Lines 36

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 1
Metric Value
cc 3
dl 0
loc 36
rs 8.8571
c 1
b 0
f 1
1
import py
2
import pytest
3
4
pytest_plugins = 'pytester',
5
6
THIS = py.path.local(__file__)
7
STORAGE = THIS.dirpath('test_storage')
8
9
10
def test_help(testdir):
11
    result = testdir.run('py.test-benchmark', '--help')
12
    result.stdout.fnmatch_lines([
13
        "usage: py.test-benchmark [-h [COMMAND]] [--storage URI] [--verbose]",
14
        "                         {help,list,compare} ...",
15
        "",
16
        "pytest_benchmark's management commands.",
17
        "",
18
        "optional arguments:",
19
        "  -h [COMMAND], --help [COMMAND]",
20
        "                        Display help and exit.",
21
        "  --storage URI, -s URI",
22
        "                        Specify a path to store the runs as uri in form",
23
        "                        file://path or elasticsearch+http[s]://host1,host2/[in",
24
        "                        dex/doctype?project_name=Project] (when --benchmark-",
25
        "                        save or --benchmark-autosave are used). For backwards",
26
        "                        compatibility unexpected values are converted to",
27
        "                        file://<value>. Default: 'file://./.benchmarks'.",
28
        "  --verbose, -v         Dump diagnostic and progress information.",
29
        "",
30
        "commands:",
31
        "  {help,list,compare}",
32
        "    help                Display help and exit.",
33
        "    list                List saved runs.",
34
        "    compare             Compare saved runs.",
35
    ])
36
    assert result.ret == 0
37
38
39
def test_help(testdir):
40
    result = testdir.run('py.test-benchmark', 'help')
41
    result.stdout.fnmatch_lines([
42
        'usage: py.test-benchmark help [-h] [command]',
43
        '',
44
        'Display help and exit.',
45
        '',
46
        'positional arguments:',
47
        '  command',
48
        '',
49
        'optional arguments:',
50
        '  -h, --help  show this help message and exit',
51
    ])
52
53
54
@pytest.mark.parametrize('args', ['list --help', 'help list'])
55
def test_help_list(testdir, args):
56
    result = testdir.run('py.test-benchmark', *args.split())
57
    result.stdout.fnmatch_lines([
58
        "usage: py.test-benchmark list [-h]",
59
        "",
60
        "List saved runs.",
61
        "",
62
        "optional arguments:",
63
        "  -h, --help  show this help message and exit",
64
    ])
65
    assert result.ret == 0
66
67
68
@pytest.mark.parametrize('args', ['compare --help', 'help compare'])
69
def test_help_compare(testdir, args):
70
    result = testdir.run('py.test-benchmark', *args.split())
71
    result.stdout.fnmatch_lines([
72
        "usage: py.test-benchmark compare [-h] [--sort COL] [--group-by LABEL]",
73
        "                                 [--columns LABELS] [--name FORMAT]",
74
        "                                 [--histogram [FILENAME-PREFIX]]",
75
        "                                 [--csv [FILENAME]]",
76
        "                                 [glob_or_file [glob_or_file ...]]",
77
        "",
78
        "Compare saved runs.",
79
        "",
80
        "positional arguments:",
81
        "  glob_or_file          Glob or exact path for json files. If not specified",
82
        "                        all runs are loaded.",
83
        "",
84
        "optional arguments:",
85
        "  -h, --help            show this help message and exit",
86
        "  --sort COL            Column to sort on. Can be one of: 'min', 'max',",
87
        "                        'mean', 'stddev', 'name', 'fullname'. Default: 'min'",
88
        "  --group-by LABEL      How to group tests. Can be one of: 'group', 'name',",
89
        "                        'fullname', 'func', 'fullfunc', 'param' or",
90
        "                        'param:NAME', where NAME is the name passed to",
91
        "                        @pytest.parametrize. Default: 'group'",
92
        "  --columns LABELS      Comma-separated list of columns to show in the result",
93
        "                        table. Default: 'min, max, mean, stddev, median, iqr,",
94
        "                        outliers, rounds, iterations'",
95
        "  --name FORMAT         How to format names in results. Can be one of 'short',",
96
        "                        'normal', 'long'. Default: 'normal'",
97
        "  --histogram [FILENAME-PREFIX]",
98
        "                        Plot graphs of min/max/avg/stddev over time in",
99
        "                        FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX",
100
        "                        contains slashes ('/') then directories will be",
101
        "                        created. Default: 'benchmark_*'",
102
        "  --csv [FILENAME]      Save a csv report. If FILENAME contains slashes ('/')",
103
        "                        then directories will be created. Default:",
104
        "                        'benchmark_*'",
105
        "",
106
        "examples:",
107
        "",
108
        "    pytest-benchmark compare 'Linux-CPython-3.5-64bit/*'",
109
        "",
110
        "        Loads all benchmarks ran with that interpreter. Note the special quoting that disables your shell's "
111
        "glob",
112
        "        expansion.",
113
        "",
114
        "    pytest-benchmark compare 0001",
115
        "",
116
        "        Loads first run from all the interpreters.",
117
        "",
118
        "    pytest-benchmark compare /foo/bar/0001_abc.json /lorem/ipsum/0001_sir_dolor.json",
119
        "",
120
        "        Loads runs from exactly those files.",
121
    ])
122
    assert result.ret == 0
123
124
125
def test_list(testdir):
126
    result = testdir.run('py.test-benchmark', '--storage', STORAGE, 'list')
127
    assert result.stderr.lines == []
128
    result.stdout.fnmatch_lines([
129
        '*0001_*.json',
130
        '*0002_*.json',
131
        '*0003_*.json',
132
        '*0004_*.json',
133
        '*0005_*.json',
134
        '*0006_*.json',
135
        '*0007_*.json',
136
        '*0008_*.json',
137
        '*0009_*.json',
138
        '*0010_*.json',
139
        '*0011_*.json',
140
        '*0012_*.json',
141
        '*0013_*.json',
142
        '*0014_*.json',
143
        '*0015_*.json',
144
        '*0016_*.json',
145
        '*0017_*.json',
146
        '*0018_*.json',
147
        '*0019_*.json',
148
        '*0020_*.json',
149
        '*0021_*.json',
150
        '*0022_*.json',
151
        '*0023_*.json',
152
        '*0024_*.json',
153
        '*0025_*.json',
154
        '*0026_*.json',
155
        '*0027_*.json',
156
        '*0028_*.json',
157
        '*0029_*.json',
158
        '*0030_*.json',
159
    ])
160
    assert result.ret == 0
161
162
163
@pytest.mark.parametrize('name', ['short', 'long', 'normal'])
164
def test_compare(testdir, name):
165
    result = testdir.run('py.test-benchmark', '--storage', STORAGE, 'compare', '0001', '0002', '0003',
166
                         '--sort', 'min',
167
                         '--columns', 'min,max',
168
                         '--name', name,
169
                         '--histogram', 'foobar',
170
                         '--csv', 'foobar')
171
    result.stderr.fnmatch_lines([
172
        'Generated csv: *foobar.csv'
173
    ])
174
    assert testdir.tmpdir.join('foobar.csv').readlines(cr=0) == [
175
        "name,min,max",
176
        "tests/test_normal.py::test_xfast_parametrized[0],2.1562856786391314e-07,1.0318615857292624e-05",
177
        "tests/test_normal.py::test_xfast_parametrized[0],2.1690275610947028e-07,7.739299681128525e-06",
178
        "tests/test_normal.py::test_xfast_parametrized[0],2.1731454219544334e-07,1.1447389118979421e-05",
179
        ""
180
    ]
181
    result.stdout.fnmatch_lines([
182
        'Computing stats ...',
183
        '---*--- benchmark: 3 tests ---*---',
184
        'Name (time in ns) *                   Min                    Max          ',
185
        '---*---',
186
        '*xfast_parametrized[[]0[]] (0003*)     215.6286 (1.0)      10,318.6159 (1.33)   ',
187
        '*xfast_parametrized[[]0[]] (0002*)     216.9028 (1.01)      7,739.2997 (1.0)    ',
188
        '*xfast_parametrized[[]0[]] (0001*)     217.3145 (1.01)     11,447.3891 (1.48)   ',
189
        '---*---',
190
        '',
191
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
192
    ])
193
    assert result.ret == 0
194