Completed
Push — master ( b18166...49a8ad )
by Ionel Cristian
59s
created

src.pytest_benchmark.pytest_benchmark_group_stats()   A

Complexity

Conditions 1

Size

Total Lines 19

Duplication

Lines 0
Ratio 0 %
Metric Value
cc 1
dl 0
loc 19
rs 9.4285
1
def pytest_benchmark_generate_machine_info(config):
2
    """
3
    To completely replace the generated machine_info do something like this:
4
5
    .. sourcecode:: python
6
7
        def pytest_benchmark_update_machine_info(config):
8
            return {'user': getpass.getuser()}
9
    """
10
    pass
11
12
13
def pytest_benchmark_update_machine_info(config, info):
14
    """
15
    If benchmarks are compared and machine_info is different then warnings will be shown.
16
17
    To add the current user to the commit info override the hook in your conftest.py like this:
18
19
    .. sourcecode:: python
20
21
        def pytest_benchmark_update_machine_info(config, info):
22
            info['user'] = getpass.getuser()
23
    """
24
    pass
25
26
27
def pytest_benchmark_generate_commit_info(config):
28
    """
29
    To completely replace the generated commit_info do something like this:
30
31
    .. sourcecode:: python
32
33
        def pytest_benchmark_generate_commit_info(config):
34
            return {'id': subprocess.check_output(['svnversion']).strip()}
35
    """
36
    pass
37
38
39
def pytest_benchmark_update_commit_info(config, info):
40
    """
41
    To add something into the commit_info, like the commit message do something like this:
42
43
    .. sourcecode:: python
44
45
        def pytest_benchmark_update_commit_info(config, info):
46
            info['message'] = subprocess.check_output(['git', 'log', '-1', '--pretty=%B']).strip()
47
    """
48
    pass
49
50
51
def pytest_benchmark_group_stats(config, benchmarks, group_by):
52
    """
53
    You may perform grouping customization here, in case the builtin grouping doesn't suit you.
54
55
    Example:
56
57
    .. sourcecode:: python
58
59
        @pytest.mark.hookwrapper
60
        def pytest_benchmark_group_stats(config, benchmarks, group_by):
61
            outcome = yield
62
            if group_by == "special":  # when you use --benchmark-group-by=special
63
                result = defaultdict(list)
64
                for bench in benchmarks:
65
                    # `bench.special` doesn't exist, replace with whatever you need
66
                    result[bench.special].append(bench)
67
                outcome.force_result(result.items())
68
    """
69
    pass
70
71
72
def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_info, commit_info):
73
    """
74
    You should read pytest-benchmark's code if you really need to wholly customize the json.
75
76
    .. warning::
77
78
        Improperly customizing this may cause breakage if ``--benchmark-compare`` or ``--benchmark-histogram`` are used.
79
80
    By default, ``pytest_benchmark_generate_json`` strips benchmarks that have errors from the output. To prevent this,
81
    implement the hook like this:
82
83
    .. sourcecode:: python
84
85
        @pytest.mark.hookwrapper
86
        def pytest_benchmark_generate_json(config, benchmarks, include_data, machine_info, commit_info):
87
            for bench in benchmarks:
88
                bench.has_error = False
89
            yield
90
    """
91
    pass
92
93
94
def pytest_benchmark_update_json(config, benchmarks, output_json):
95
    """
96
    Use this to add custom fields in the output JSON.
97
98
    Example:
99
100
    .. sourcecode:: python
101
102
        def pytest_benchmark_update_json(config, benchmarks, output_json):
103
            output_json['foo'] = 'bar'
104
    """
105
    pass
106
107
108
def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
109
    """
110
    You may want to use this hook to implement custom checks or abort execution.
111
    ``pytest-benchmark`` builtin hook does this:
112
113
    .. sourcecode:: python
114
115
        def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
116
            if compared_benchmark["machine_info"] != machine_info:
117
                benchmarksession.logger.warn(
118
                    "Benchmark machine_info is different. Current: %s VS saved: %s." % (
119
                        format_dict(machine_info),
120
                        format_dict(compared_benchmark["machine_info"]),
121
                    )
122
            )
123
    """
124
    pass
125
126
127
pytest_benchmark_generate_commit_info.firstresult = True
128
pytest_benchmark_generate_json.firstresult = True
129
pytest_benchmark_generate_machine_info.firstresult = True
130
pytest_benchmark_group_stats.firstresult = True
131