Passed
Pull Request — master (#206)
by Grega
01:39 queued 41s
created

NiaPy.runner   A

Complexity

Total Complexity 39

Size/Duplication

Total Lines 269
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 143
dl 0
loc 269
rs 9.28
c 0
b 0
f 0
wmc 39

9 Methods

Rating   Name   Duplication   Size   Complexity  
F Runner.__export_to_latex() 0 69 14
A Runner.__init__() 0 18 1
A Runner.__export_to_json() 0 12 2
A Runner.__generate_export_name() 0 12 1
A Runner.benchmark_factory() 0 13 1
A Runner.__export_to_log() 0 4 1
D Runner.run() 0 57 13
A Runner.__create_export_dir() 0 5 2
A Runner.__export_to_xlsx() 0 25 4
1
"""Implementation of Runner utility class."""
2
3
import datetime
4
import json
5
import os
6
import logging
7
8
import xlsxwriter
9
from numpy import (
10
    amin,
11
    median,
12
    amax,
13
    mean,
14
    std
15
)
16
17
18
from NiaPy.algorithms import AlgorithmUtility
19
20
logging.basicConfig()
21
logger = logging.getLogger('NiaPy.runner.Runner')
22
logger.setLevel('INFO')
23
24
__all__ = ["Runner"]
25
26
27
class Runner:
28
    r"""Runner utility feature.
29
30
    Feature which enables running multiple algorithms with multiple benchmarks.
31
    It also support exporting results in various formats (e.g. LaTeX, Excel, JSON)
32
33
    Attributes:
34
            D (int): Dimension of problem
35
            NP (int): Population size
36
            nFES (int): Number of function evaluations
37
            nRuns (int): Number of repetitions
38
            useAlgorithms (list of Algorithm): List of algorithms to run
39
            useBenchmarks (list of Benchmarks): List of benchmarks to run
40
41
    Returns:
42
            results (Dict[str, Dict]): Returns the results.
43
44
    """
45
46
    def __init__(self, D=10, nFES=1000000, nRuns=1, useAlgorithms='ArtificialBeeColonyAlgorithm', useBenchmarks='Ackley', **kwargs):
47
        r"""Initialize Runner.
48
49
        Args:
50
                D (int): Dimension of problem
51
                nFES (int): Number of function evaluations
52
                nRuns (int): Number of repetitions
53
                useAlgorithms (list of Algorithm): List of algorithms to run
54
                useBenchmarks (list of Benchmarks): List of benchmarks to run
55
56
        """
57
58
        self.D = D
59
        self.nFES = nFES
60
        self.nRuns = nRuns
61
        self.useAlgorithms = useAlgorithms
62
        self.useBenchmarks = useBenchmarks
63
        self.results = {}
64
65
    def benchmark_factory(self, name):
66
        r"""Create optimization task.
67
68
        Args:
69
                name (str): Benchmark name.
70
71
        Returns:
72
                Task: Optimization task to use.
73
74
        """
75
76
        from NiaPy.task import StoppingTask, OptimizationType
77
        return StoppingTask(D=self.D, nFES=self.nFES, optType=OptimizationType.MINIMIZATION, benchmark=name)
78
79
    @classmethod
80
    def __create_export_dir(cls):
81
        r"""Create export directory if not already createed."""
82
        if not os.path.exists("export"):
83
            os.makedirs("export")
84
85
    @classmethod
86
    def __generate_export_name(cls, extension):
87
        r"""Generate export file name.
88
89
        Args:
90
                extension (str): File format.
91
92
        Returns:
93
94
        """
95
96
        return "export/" + str(datetime.datetime.now()).replace(":", ".") + "." + extension
97
98
    def __export_to_log(self):
99
        r"""Print the results to terminal."""
100
101
        print(self.results)
102
103
    def __export_to_json(self):
104
        r"""Export the results in the JSON form.
105
106
        See Also:
107
                * :func:`NiaPy.Runner.__createExportDir`
108
109
        """
110
111
        self.__create_export_dir()
112
        with open(self.__generate_export_name("json"), "w") as outFile:
113
            json.dump(self.results, outFile)
114
            logger.info("Export to JSON completed!")
115
116
    def __export_to_xlsx(self):
117
        r"""Export the results in the xlsx form.
118
119
        See Also:
120
                :func:`NiaPy.Runner.__generateExportName`
121
122
        """
123
124
        self.__create_export_dir()
125
        workbook = xlsxwriter.Workbook(self.__generate_export_name("xlsx"))
126
        worksheet = workbook.add_worksheet()
127
        row, col, nRuns = 0, 0, 0
128
129
        for alg in self.results:
130
            _, col = worksheet.write(row, col, alg), col + 1
131
            for bench in self.results[alg]:
132
                worksheet.write(row, col, bench)
133
                nRuns = len(self.results[alg][bench])
134
                for i in range(len(self.results[alg][bench])):
135
                    _, row = worksheet.write(row, col, self.results[alg][bench][i]), row + 1
136
                row, col = row - len(self.results[alg][bench]), col + 1
137
            row, col = row + 1 + nRuns, col - 1 + len(self.results[alg])
138
139
        workbook.close()
140
        logger.info("Export to XLSX completed!")
141
142
    def __export_to_latex(self):
143
        r"""Export the results in the form of latex table.
144
145
        See Also:
146
                :func:`NiaPy.Runner.__createExportDir`
147
                :func:`NiaPy.Runner.__generateExportName`
148
149
        """
150
151
        self.__create_export_dir()
152
153
        metrics = ["Best", "Median", "Worst", "Mean", "Std."]
154
155
        def only_upper(s):
156
            return "".join(c for c in s if c.isupper())
157
158
        with open(self.__generate_export_name("tex"), "a") as outFile:
159
            outFile.write("\\documentclass{article}\n")
160
            outFile.write("\\usepackage[utf8]{inputenc}\n")
161
            outFile.write("\\usepackage{siunitx}\n")
162
            outFile.write("\\sisetup{\n")
163
            outFile.write("round-mode=places,round-precision=3}\n")
164
            outFile.write("\\begin{document}\n")
165
            outFile.write("\\begin{table}[h]\n")
166
            outFile.write("\\centering\n")
167
            begin_tabular = "\\begin{tabular}{cc"
168
            for alg in self.results:
169
                for _i in range(len(self.results[alg])):
170
                    begin_tabular += "S"
171
                firstLine = "   &"
172
                for benchmark in self.results[alg].keys():
173
                    firstLine += "  &   \\multicolumn{1}{c}{\\textbf{" + benchmark + "}}"
174
                firstLine += " \\\\"
175
                break
176
            begin_tabular += "}\n"
177
            outFile.write(begin_tabular)
178
            outFile.write("\\hline\n")
179
            outFile.write(firstLine + "\n")
0 ignored issues
show
introduced by
The variable firstLine does not seem to be defined for all execution paths.
Loading history...
180
            outFile.write("\\hline\n")
181
            for alg in self.results:
182
                for metric in metrics:
183
                    line = ""
184
                    if metric != "Worst":
185
                        line += "   &   " + metric
186
                    else:
187
                        shortAlg = ""
188
                        if alg.endswith("Algorithm"):
189
                            shortAlg = only_upper(alg[:-9])
190
                        else:
191
                            shortAlg = only_upper(alg)
192
                        line += "\\textbf{" + shortAlg + "} &   " + metric
193
                        for benchmark in self.results[alg]:
194
                            if metric == "Best":
195
                                line += "   &   " + str(amin(self.results[alg][benchmark]))
196
                            elif metric == "Median":
197
                                line += "   &   " + str(median(self.results[alg][benchmark]))
198
                            elif metric == "Worst":
199
                                line += "   &   " + str(amax(self.results[alg][benchmark]))
200
                            elif metric == "Mean":
201
                                line += "   &   " + str(mean(self.results[alg][benchmark]))
202
                            else:
203
                                line += "   &   " + str(std(self.results[alg][benchmark]))
204
                        line += "   \\\\"
205
                        outFile.write(line + "\n")
206
                    outFile.write("\\hline\n")
207
                outFile.write("\\end{tabular}\n")
208
                outFile.write("\\end{table}\n")
209
                outFile.write("\\end{document}")
210
        logger.info("Export to Latex completed!")
211
212
    def run(self, export="log", verbose=False):
213
        """Execute runner.
214
215
        Arguments:
216
                export (str): Takes export type (e.g. log, json, xlsx, latex) (default: "log")
217
                verbose (bool: Switch for verbose logging (default: {False})
218
219
        Raises:
220
                TypeError: Raises TypeError if export type is not supported
221
222
        Returns:
223
                dict: Returns dictionary of results
224
225
        See Also:
226
                * :func:`NiaPy.Runner.useAlgorithms`
227
                * :func:`NiaPy.Runner.useBenchmarks`
228
                * :func:`NiaPy.Runner.__algorithmFactory`
229
230
        """
231
232
        for alg in self.useAlgorithms:
233
            if not isinstance(alg, "".__class__):
234
                alg_name = str(type(alg).__name__)
235
            else:
236
                alg_name = alg
237
238
            self.results[alg_name] = {}
239
240
            if verbose:
241
                logger.info("Running %s...", alg_name)
242
243
            for bench in self.useBenchmarks:
244
                if not isinstance(bench, "".__class__):
245
                    bench_name = str(type(bench).__name__)
246
                else:
247
                    bench_name = bench
248
249
                if verbose:
250
                    logger.info("Running %s algorithm on %s benchmark...", alg_name, bench_name)
251
252
                benchmark_stopping_task = self.benchmark_factory(bench)
253
                self.results[alg_name][bench_name] = []
254
                for _ in range(self.nRuns):
255
                    algorithm = AlgorithmUtility().get_algorithm(alg)
256
                    self.results[alg_name][bench_name].append(algorithm.run(benchmark_stopping_task))
257
            if verbose:
258
                logger.info("---------------------------------------------------")
259
        if export == "log":
260
            self.__export_to_log()
261
        elif export == "json":
262
            self.__export_to_json()
263
        elif export == "xlsx":
264
            self.__export_to_xlsx()
265
        elif export == "latex":
266
            self.__export_to_latex()
267
        else:
268
            raise TypeError("Passed export type is not supported!")
269