Completed
Pull Request — master (#58)
by
unknown
01:10
created

BaseReportBackend.handle_saving()   B

Complexity

Conditions 5

Size

Total Lines 35

Duplication

Lines 0
Ratio 0 %

Importance

Changes 2
Bugs 0 Features 0
Metric Value
cc 5
c 2
b 0
f 0
dl 0
loc 35
rs 8.0894
1
import abc
2
3
from ..utils import get_machine_id
4
from ..logger import Logger
5
from ..utils import safe_dumps
6
7
8
class BaseReportBackend:
9
    __metaclass__ = abc.ABCMeta
10
11
    def __init__(self, config):
12
        self.verbose = config.getoption("benchmark_verbose")
13
        self.logger = Logger(self.verbose, config)
14
        self.config = config
15
        self.machine_id = get_machine_id()
16
        self.save = config.getoption("benchmark_save")
17
        self.autosave = config.getoption("benchmark_autosave")
18
        self.save_data = config.getoption("benchmark_save_data")
19
        self.json = config.getoption("benchmark_json")
20
        self.compare = config.getoption("benchmark_compare")
21
        self.storage = None
22
23
    def _save_json(self, output_json):
24
        with self.json as fh:
25
            fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode())
26
        self.logger.info("Wrote benchmark data in: %s" % self.json, purple=True)
27
28
    @abc.abstractmethod
29
    def _save(self, output_json, save):
30
        pass
31
32
    def handle_saving(self, benchmarks, machine_info):
33
        save = benchmarks and self.save or self.autosave
34
        if save or self.json:
35
            commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config)
36
            self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info)
37
38
        if self.json:
39
            output_json = self.config.hook.pytest_benchmark_generate_json(
40
                config=self.config,
41
                benchmarks=benchmarks,
42
                include_data=True,
43
                machine_info=machine_info,
44
                commit_info=commit_info,
45
            )
46
            self.config.hook.pytest_benchmark_update_json(
47
                config=self.config,
48
                benchmarks=benchmarks,
49
                output_json=output_json,
50
            )
51
            self._save_json(output_json)
52
53
        if save:
54
            output_json = self.config.hook.pytest_benchmark_generate_json(
55
                config=self.config,
56
                benchmarks=benchmarks,
57
                include_data=self.save_data,
58
                machine_info=machine_info,
59
                commit_info=commit_info,
60
            )
61
            self.config.hook.pytest_benchmark_update_json(
62
                config=self.config,
63
                benchmarks=benchmarks,
64
                output_json=output_json,
65
            )
66
            self._save(output_json, save)
67
68
    @abc.abstractmethod
69
    def _load(self, id_prefix=None):
70
        pass
71
72
    def handle_loading(self, machine_info):
73
        compared_mapping = {}
74
        if self.compare:
75
            if self.compare is True:
76
                compared_benchmarks = list(self._load())[-1:]
77
            else:
78
                compared_benchmarks = list(self._load(self.compare))
79
80
            if not compared_benchmarks:
81
                msg = "Can't compare. No benchmark files in %r" % str(self.storage)
82
                if self.compare is True:
83
                    msg += ". Can't load the previous benchmark."
84
                    code = "BENCHMARK-C2"
85
                else:
86
                    msg += " match %r." % self.compare
87
                    code = "BENCHMARK-C1"
88
                self.logger.warn(code, msg, fslocation=self.storage.location)
89
90
            for path, compared_benchmark in compared_benchmarks:
91
                self.config.hook.pytest_benchmark_compare_machine_info(
92
                    config=self.config,
93
                    benchmarksession=self,
94
                    machine_info=machine_info,
95
                    compared_benchmark=compared_benchmark,
96
                )
97
                compared_mapping[path] = dict(
98
                    (bench['fullname'], bench) for bench in compared_benchmark['benchmarks']
99
                )
100
                self.logger.info("Comparing against benchmarks from: %s" % path)
101
        return compared_mapping
102
103