Completed
Pull Request — master (#58)
by
unknown
01:47
created

ElasticReportBackend.__init__()   A

Complexity

Conditions 1

Size

Total Lines 11

Duplication

Lines 0
Ratio 0 %

Importance

Changes 3
Bugs 0 Features 0
Metric Value
cc 1
c 3
b 0
f 0
dl 0
loc 11
rs 9.4285
1
2
from .base_report_backend import BaseReportBackend
3
from ..elasticsearch_storage import ElasticsearchStorage
4
5
6
class ElasticReportBackend(BaseReportBackend):
7
    def __init__(self, config):
8
        self.elasticsearch_hosts = config.getoption("benchmark_elasticsearch_hosts")
9
        self.elasticsearch_index = config.getoption("benchmark_elasticsearch_index")
10
        self.elasticsearch_doctype = config.getoption("benchmark_elasticsearch_doctype")
11
        self.project_name = config.getoption("benchmark_project")
12
        super(ElasticReportBackend, self).__init__(config)
13
        self.storage = ElasticsearchStorage(self.elasticsearch_hosts,
14
                                            self.elasticsearch_index,
15
                                            self.elasticsearch_doctype,
16
                                            self.logger,
17
                                            default_machine_id=self.machine_id)
18
19
    def handle_saving(self, benchmarks, machine_info):
20
        save = benchmarks and self.save or self.autosave
21
        if save:
22
            commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config)
23
            self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info)
24
25
            output_json = self.config.hook.pytest_benchmark_generate_json(
26
                config=self.config,
27
                benchmarks=benchmarks,
28
                include_data=self.save_data,
29
                machine_info=machine_info,
30
                commit_info=commit_info,
31
            )
32
            self.config.hook.pytest_benchmark_update_json(
33
                config=self.config,
34
                benchmarks=benchmarks,
35
                output_json=output_json,
36
            )
37
            output_benchmarks = output_json.pop("benchmarks")
38
            for bench in output_benchmarks:
39
                # add top level info from output_json dict to each record
40
                bench.update(output_json)
41
                doc_id = "%s_%s" % (save, bench["fullname"])
42
                self.storage.save(bench, doc_id)
43
            self.logger.info("Saved benchmark data to %s to index %s as doctype %s" %
44
                             (
45
                                 self.elasticsearch_hosts,
46
                                 self.elasticsearch_index,
47
                                 self.elasticsearch_doctype
48
                             )
49
                             )
50
51
    def handle_loading(self, machine_info):
52
        compared_mapping = {}
53
        if self.compare:
54
            compared_benchmarks = list(self.storage.load(self.project_name))[-1:]
55
56
            if not compared_benchmarks:
57
                msg = "Can't compare. No benchmark records in project %s in elastic %s." % (self.project_name, self.storage.location)
58
                code = "BENCHMARK-C1"
59
                self.logger.warn(code, msg, fslocation=self.storage.location)
60
61
            for commit_time, compared_benchmark in compared_benchmarks:
62
                self.config.hook.pytest_benchmark_compare_machine_info(
63
                    config=self.config,
64
                    benchmarksession=self,
65
                    machine_info=machine_info,
66
                    compared_benchmark=compared_benchmark,
67
                )
68
                compared_mapping[commit_time] = dict(
69
                    (bench['fullname'], bench) for bench in compared_benchmark['benchmarks']
70
                )
71
                self.logger.info("Comparing against benchmarks from: %s" % commit_time)
72
73
        return compared_mapping
74