NiaPy.runner   A
last analyzed

Complexity

Total Complexity 22

Size/Duplication

Total Lines 203
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 77
dl 0
loc 203
rs 10
c 0
b 0
f 0
wmc 22

9 Methods

Rating   Name   Duplication   Size   Complexity  
A Runner.__init__() 0 18 1
A Runner.__export_to_json() 0 12 1
A Runner._export_to_xls() 0 12 1
A Runner.__generate_export_name() 0 13 1
A Runner.benchmark_factory() 0 11 1
D Runner.run() 0 58 13
A Runner.__create_export_dir() 0 5 2
A Runner.__export_to_xlsx() 0 12 1
A Runner.__export_to_dataframe_pickle() 0 12 1
1
# encoding=utf8
2
3
"""Implementation of Runner utility class."""
4
5
from __future__ import print_function
6
7
import datetime
8
import os
9
import logging
10
11
import pandas as pd
12
13
from NiaPy.task import StoppingTask, OptimizationType
14
from NiaPy.algorithms import AlgorithmUtility
15
16
logging.basicConfig()
17
logger = logging.getLogger('NiaPy.runner.Runner')
18
logger.setLevel('INFO')
19
20
__all__ = ["Runner"]
21
22
23
class Runner:
24
    r"""Runner utility feature.
25
26
    Feature which enables running multiple algorithms with multiple benchmarks.
27
    It also support exporting results in various formats (e.g. Pandas DataFrame, JSON, Excel)
28
29
    Attributes:
30
            D (int): Dimension of problem
31
            NP (int): Population size
32
            nFES (int): Number of function evaluations
33
            nRuns (int): Number of repetitions
34
            useAlgorithms (Union[List[str], List[Algorithm]]): List of algorithms to run
35
            useBenchmarks (Union[List[str], List[Benchmark]]): List of benchmarks to run
36
37
    Returns:
38
            results (Dict[str, Dict]): Returns the results.
39
40
    """
41
42
    def __init__(self, D=10, nFES=1000000, nRuns=1, useAlgorithms='ArtificialBeeColonyAlgorithm', useBenchmarks='Ackley', **kwargs):
43
        r"""Initialize Runner.
44
45
        Args:
46
                D (int): Dimension of problem
47
                nFES (int): Number of function evaluations
48
                nRuns (int): Number of repetitions
49
                useAlgorithms (List[Algorithm]): List of algorithms to run
50
                useBenchmarks (List[Benchmarks]): List of benchmarks to run
51
52
        """
53
54
        self.D = D
55
        self.nFES = nFES
56
        self.nRuns = nRuns
57
        self.useAlgorithms = useAlgorithms
58
        self.useBenchmarks = useBenchmarks
59
        self.results = {}
60
61
    def benchmark_factory(self, name):
62
        r"""Create optimization task.
63
64
        Args:
65
                name (str): Benchmark name.
66
67
        Returns:
68
                Task: Optimization task to use.
69
70
        """
71
        return StoppingTask(D=self.D, nFES=self.nFES, optType=OptimizationType.MINIMIZATION, benchmark=name)
72
73
    @classmethod
74
    def __create_export_dir(cls):
75
        r"""Create export directory if not already createed."""
76
        if not os.path.exists("export"):
77
            os.makedirs("export")
78
79
    @classmethod
80
    def __generate_export_name(cls, extension):
81
        r"""Generate export file name.
82
83
        Args:
84
                extension (str): File format.
85
86
        Returns:
87
88
        """
89
90
        Runner.__create_export_dir()
91
        return "export/" + str(datetime.datetime.now()).replace(":", ".") + "." + extension
92
93
    def __export_to_dataframe_pickle(self):
94
        r"""Export the results in the pandas dataframe pickle.
95
96
        See Also:
97
                * :func:`NiaPy.Runner.__createExportDir`
98
                * :func:`NiaPy.Runner.__generateExportName`
99
100
        """
101
102
        dataframe = pd.DataFrame.from_dict(self.results)
103
        dataframe.to_pickle(self.__generate_export_name("pkl"))
104
        logger.info("Export to Pandas DataFrame pickle (pkl) completed!")
105
106
    def __export_to_json(self):
107
        r"""Export the results in the JSON file.
108
109
        See Also:
110
                * :func:`NiaPy.Runner.__createExportDir`
111
                * :func:`NiaPy.Runner.__generateExportName`
112
113
        """
114
115
        dataframe = pd.DataFrame.from_dict(self.results)
116
        dataframe.to_json(self.__generate_export_name("json"))
117
        logger.info("Export to JSON file completed!")
118
119
    def _export_to_xls(self):
120
        r"""Export the results in the xls file.
121
122
        See Also:
123
                * :func:`NiaPy.Runner.__createExportDir`
124
                * :func:`NiaPy.Runner.__generateExportName`
125
126
        """
127
128
        dataframe = pd.DataFrame.from_dict(self.results)
129
        dataframe.to_excel(self.__generate_export_name("xls"))
130
        logger.info("Export to XLS completed!")
131
132
    def __export_to_xlsx(self):
133
        r"""Export the results in the xlsx file.
134
135
        See Also:
136
                * :func:`NiaPy.Runner.__createExportDir`
137
                * :func:`NiaPy.Runner.__generateExportName`
138
139
        """
140
141
        dataframe = pd.DataFrame.from_dict(self.results)
142
        dataframe.to_excel(self.__generate_export_name("xslx"))
143
        logger.info("Export to XLSX file completed!")
144
145
    def run(self, export="dataframe", verbose=False):
146
        """Execute runner.
147
148
        Arguments:
149
                export (str): Takes export type (e.g. dataframe, json, xls, xlsx) (default: "dataframe")
150
                verbose (bool): Switch for verbose logging (default: {False})
151
152
        Raises:
153
                TypeError: Raises TypeError if export type is not supported
154
155
        Returns:
156
                dict: Returns dictionary of results
157
158
        See Also:
159
                * :func:`NiaPy.Runner.useAlgorithms`
160
                * :func:`NiaPy.Runner.useBenchmarks`
161
                * :func:`NiaPy.Runner.__algorithmFactory`
162
163
        """
164
165
        for alg in self.useAlgorithms:
166
            if not isinstance(alg, "".__class__):
167
                alg_name = str(type(alg).__name__)
168
            else:
169
                alg_name = alg
170
171
            self.results[alg_name] = {}
172
173
            if verbose:
174
                logger.info("Running %s...", alg_name)
175
176
            for bench in self.useBenchmarks:
177
                if not isinstance(bench, "".__class__):
178
                    bench_name = str(type(bench).__name__)
179
                else:
180
                    bench_name = bench
181
182
                if verbose:
183
                    logger.info("Running %s algorithm on %s benchmark...", alg_name, bench_name)
184
185
                self.results[alg_name][bench_name] = []
186
                for _ in range(self.nRuns):
187
                    algorithm = AlgorithmUtility().get_algorithm(alg)
188
                    benchmark_stopping_task = self.benchmark_factory(bench)
189
                    self.results[alg_name][bench_name].append(algorithm.run(benchmark_stopping_task))
190
            if verbose:
191
                logger.info("---------------------------------------------------")
192
        if export == "dataframe":
193
            self.__export_to_dataframe_pickle()
194
        elif export == "json":
195
            self.__export_to_json()
196
        elif export == "xsl":
197
            self._export_to_xls()
198
        elif export == "xlsx":
199
            self.__export_to_xlsx()
200
        else:
201
            raise TypeError("Passed export type %s is not supported!", export)
202
        return self.results
203