Passed
Pull Request — master (#206)
by Grega
01:29
created

NiaPy.runner.Runner.__init__()   A

Complexity

Conditions 1

Size

Total Lines 21
Code Lines 7

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
eloc 7
nop 8
dl 0
loc 21
rs 10
c 0
b 0
f 0

How to fix   Many Parameters   

Many Parameters

Methods with many parameters are not only hard to understand, but their parameters also often become inconsistent when you need more, or different data.

There are several approaches to avoid long parameter lists:

1
"""Implementation of Runner utility class."""
2
3
import datetime
4
import json
5
import os
6
import logging
7
8
import xlsxwriter
9
from numpy import (
10
    amin,
11
    median,
12
    amax,
13
    mean,
14
    std
15
)
16
17
18
from NiaPy.algorithms import AlgorithmUtility
19
20
logging.basicConfig()
21
logger = logging.getLogger('NiaPy.runner.Runner')
22
logger.setLevel('INFO')
23
24
__all__ = ["Runner"]
25
26
27
class Runner:
28
    r"""Runner utility feature.
29
30
    Feature which enables running multiple algorithms with multiple benchmarks.
31
    It also support exporting results in various formats (e.g. LaTeX, Excel, JSON)
32
33
    Attributes:
34
            D (int): Dimension of problem
35
            NP (int): Population size
36
            nFES (int): Number of function evaluations
37
            nRuns (int): Number of repetitions
38
            useAlgorithms (list of Algorithm): List of algorithms to run
39
            useBenchmarks (list of Benchmarks): List of benchmarks to run
40
41
    Returns:
42
            results (Dict[str, Dict]): Returns the results.
43
44
    """
45
46
    def __init__(self, D=10, nFES=1000000, nGEN=100000, nRuns=1, useAlgorithms='ArtificialBeeColonyAlgorithm', useBenchmarks='Ackley', **kwargs):
47
        r"""Initialize Runner.
48
49
        **__init__(self, D, NP, nFES, nRuns, useAlgorithms, useBenchmarks, ...)**
50
51
        Arguments:
52
                D (int): Dimension of problem
53
                nFES (int): Number of function evaluations
54
                nGEN (int): Number of generations
55
                nRuns (int): Number of repetitions
56
                useAlgorithms (list of Algorithm): List of algorithms to run
57
                useBenchmarks (list of Benchmarks): List of benchmarks to run
58
59
        """
60
61
        self.D = D
62
        self.nFES = nFES
63
        self.nRuns = nRuns
64
        self.useAlgorithms = useAlgorithms
65
        self.useBenchmarks = useBenchmarks
66
        self.results = {}
67
68
    def benchmark_factory(self, name):
69
        r"""Create optimization task.
70
71
        Args:
72
                name (str): Benchmark name.
73
74
        Returns:
75
                Task: Optimization task to use.
76
77
        """
78
79
        from NiaPy.task import StoppingTask, OptimizationType
80
        return StoppingTask(D=self.D, nFES=self.nFES, optType=OptimizationType.MINIMIZATION, benchmark=name)
81
82
    @classmethod
83
    def __create_export_dir(cls):
84
        r"""Create export directory if not already createed."""
85
        if not os.path.exists("export"):
86
            os.makedirs("export")
87
88
    @classmethod
89
    def __generate_export_name(cls, extension):
90
        r"""Generate export file name.
91
92
        Args:
93
                extension (str): File format.
94
95
        Returns:
96
97
        """
98
99
        return "export/" + str(datetime.datetime.now()).replace(":", ".") + "." + extension
100
101
    def __export_to_log(self):
102
        r"""Print the results to terminal."""
103
104
        print(self.results)
105
106
    def __export_to_json(self):
107
        r"""Export the results in the JSON form.
108
109
        See Also:
110
                * :func:`NiaPy.Runner.__createExportDir`
111
112
        """
113
114
        self.__create_export_dir()
115
        with open(self.__generate_export_name("json"), "w") as outFile:
116
            json.dump(self.results, outFile)
117
            logger.info("Export to JSON completed!")
118
119
    def __export_to_xlsx(self):
120
        r"""Export the results in the xlsx form.
121
122
        See Also:
123
                :func:`NiaPy.Runner.__generateExportName`
124
125
        """
126
127
        self.__create_export_dir()
128
        workbook = xlsxwriter.Workbook(self.__generate_export_name("xlsx"))
129
        worksheet = workbook.add_worksheet()
130
        row, col, nRuns = 0, 0, 0
131
132
        for alg in self.results:
133
            _, col = worksheet.write(row, col, alg), col + 1
134
            for bench in self.results[alg]:
135
                worksheet.write(row, col, bench)
136
                nRuns = len(self.results[alg][bench])
137
                for i in range(len(self.results[alg][bench])):
138
                    _, row = worksheet.write(row, col, self.results[alg][bench][i]), row + 1
139
                row, col = row - len(self.results[alg][bench]), col + 1
140
            row, col = row + 1 + nRuns, col - 1 + len(self.results[alg])
141
142
        workbook.close()
143
        logger.info("Export to XLSX completed!")
144
145
    def __export_to_latex(self):
146
        r"""Export the results in the form of latex table.
147
148
        See Also:
149
                :func:`NiaPy.Runner.__createExportDir`
150
                :func:`NiaPy.Runner.__generateExportName`
151
152
        """
153
154
        self.__create_export_dir()
155
156
        metrics = ["Best", "Median", "Worst", "Mean", "Std."]
157
158
        def only_upper(s):
159
            return "".join(c for c in s if c.isupper())
160
161
        with open(self.__generate_export_name("tex"), "a") as outFile:
162
            outFile.write("\\documentclass{article}\n")
163
            outFile.write("\\usepackage[utf8]{inputenc}\n")
164
            outFile.write("\\usepackage{siunitx}\n")
165
            outFile.write("\\sisetup{\n")
166
            outFile.write("round-mode=places,round-precision=3}\n")
167
            outFile.write("\\begin{document}\n")
168
            outFile.write("\\begin{table}[h]\n")
169
            outFile.write("\\centering\n")
170
            begin_tabular = "\\begin{tabular}{cc"
171
            for alg in self.results:
172
                for _i in range(len(self.results[alg])):
173
                    begin_tabular += "S"
174
                firstLine = "   &"
175
                for benchmark in self.results[alg].keys():
176
                    firstLine += "  &   \\multicolumn{1}{c}{\\textbf{" + benchmark + "}}"
177
                firstLine += " \\\\"
178
                break
179
            begin_tabular += "}\n"
180
            outFile.write(begin_tabular)
181
            outFile.write("\\hline\n")
182
            outFile.write(firstLine + "\n")
0 ignored issues
show
introduced by
The variable firstLine does not seem to be defined for all execution paths.
Loading history...
183
            outFile.write("\\hline\n")
184
            for alg in self.results:
185
                for metric in metrics:
186
                    line = ""
187
                    if metric != "Worst":
188
                        line += "   &   " + metric
189
                    else:
190
                        shortAlg = ""
191
                        if alg.endswith("Algorithm"):
192
                            shortAlg = only_upper(alg[:-9])
193
                        else:
194
                            shortAlg = only_upper(alg)
195
                        line += "\\textbf{" + shortAlg + "} &   " + metric
196
                        for benchmark in self.results[alg]:
197
                            if metric == "Best":
198
                                line += "   &   " + str(amin(self.results[alg][benchmark]))
199
                            elif metric == "Median":
200
                                line += "   &   " + str(median(self.results[alg][benchmark]))
201
                            elif metric == "Worst":
202
                                line += "   &   " + str(amax(self.results[alg][benchmark]))
203
                            elif metric == "Mean":
204
                                line += "   &   " + str(mean(self.results[alg][benchmark]))
205
                            else:
206
                                line += "   &   " + str(std(self.results[alg][benchmark]))
207
                        line += "   \\\\"
208
                        outFile.write(line + "\n")
209
                    outFile.write("\\hline\n")
210
                outFile.write("\\end{tabular}\n")
211
                outFile.write("\\end{table}\n")
212
                outFile.write("\\end{document}")
213
        logger.info("Export to Latex completed!")
214
215
    def run(self, export="log", verbose=False):
216
        """Execute runner.
217
218
        Arguments:
219
                export (str): Takes export type (e.g. log, json, xlsx, latex) (default: "log")
220
                verbose (bool: Switch for verbose logging (default: {False})
221
222
        Raises:
223
                TypeError: Raises TypeError if export type is not supported
224
225
        Returns:
226
                dict: Returns dictionary of results
227
228
        See Also:
229
                * :func:`NiaPy.Runner.useAlgorithms`
230
                * :func:`NiaPy.Runner.useBenchmarks`
231
                * :func:`NiaPy.Runner.__algorithmFactory`
232
233
        """
234
235
        for alg in self.useAlgorithms:
236
            if not isinstance(alg, "".__class__):
237
                alg_name = str(type(alg).__name__)
238
            else:
239
                alg_name = alg
240
241
            self.results[alg_name] = {}
242
243
            if verbose:
244
                logger.info("Running %s...", alg_name)
245
246
            for bench in self.useBenchmarks:
247
                if not isinstance(bench, "".__class__):
248
                    bench_name = str(type(bench).__name__)
249
                else:
250
                    bench_name = bench
251
252
                if verbose:
253
                    logger.info("Running %s algorithm on %s benchmark...", alg_name, bench_name)
254
255
                benchmark_stopping_task = self.benchmark_factory(bench)
256
                self.results[alg_name][bench_name] = []
257
                for _ in range(self.nRuns):
258
                    algorithm = AlgorithmUtility().get_algorithm(alg)
259
                    self.results[alg_name][bench_name].append(algorithm.run(benchmark_stopping_task))
260
            if verbose:
261
                logger.info("---------------------------------------------------")
262
        if export == "log":
263
            self.__export_to_log()
264
        elif export == "json":
265
            self.__export_to_json()
266
        elif export == "xlsx":
267
            self.__export_to_xlsx()
268
        elif export == "latex":
269
            self.__export_to_latex()
270
        else:
271
            raise TypeError("Passed export type is not supported!")
272