Passed
Pull Request — master (#206)
by Grega
01:38
created

NiaPy   A

Complexity

Total Complexity 39

Size/Duplication

Total Lines 271
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 145
dl 0
loc 271
rs 9.28
c 0
b 0
f 0
wmc 39

9 Methods

Rating   Name   Duplication   Size   Complexity  
A Runner.__init__() 0 21 1
A Runner.__exportToJson() 0 12 2
A Runner.__exportToXls() 0 25 4
F Runner.__exportToLatex() 0 69 14
A Runner.__exportToLog() 0 4 1
D Runner.run() 0 57 13
A Runner.benchmarkFactory() 0 13 1
A Runner.__generateExportName() 0 12 1
A Runner.__createExportDir() 0 5 2
1
# encoding=utf8
2
3
"""Python micro framework for building nature-inspired algorithms."""
4
5
from __future__ import print_function
6
7
import os
8
import logging
9
import json
10
import datetime
11
import xlsxwriter
12
from numpy import amin, amax, median, mean, std
13
14
from NiaPy import util, algorithms, benchmarks, task
15
from NiaPy.algorithms import AlgorithmUtility
16
17
__all__ = ["algorithms", "benchmarks", "util", "task"]
18
__project__ = "NiaPy"
19
__version__ = "2.0.0rc4"
20
21
VERSION = "{0} v{1}".format(__project__, __version__)
22
23
logging.basicConfig()
24
logger = logging.getLogger("NiaPy")
25
logger.setLevel("INFO")
26
27
28
class Runner:
29
    r"""Runner utility feature.
30
31
    Feature which enables running multiple algorithms with multiple benchmarks.
32
    It also support exporting results in various formats (e.g. LaTeX, Excel, JSON)
33
34
    Attributes:
35
            D (int): Dimension of problem
36
            NP (int): Population size
37
            nFES (int): Number of function evaluations
38
            nRuns (int): Number of repetitions
39
            useAlgorithms (list of Algorithm): List of algorithms to run
40
            useBenchmarks (list of Benchmarks): List of benchmarks to run
41
            results (): Dictionary containing the results
42
43
    """
44
45
    def __init__(self, D=10, nFES=100, nRuns=1, useAlgorithms="ArtificialBeeColonyAlgorithm", useBenchmarks="Ackley"):
46
        r"""Initialize Runner.
47
48
        **__init__(self, D, NP, nFES, nRuns, useAlgorithms, useBenchmarks, ...)**
49
50
        Arguments:
51
                D (int): Dimension of problem
52
                nFES (int): Number of function evaluations
53
                nGEN (int): Number of generations
54
                nRuns (int): Number of repetitions
55
                useAlgorithms (list of Algorithm): List of algorithms to run
56
                useBenchmarks (list of Benchmarks): List of benchmarks to run
57
58
        """
59
60
        self.D = D
61
        self.nFES = nFES
62
        self.nRuns = nRuns
63
        self.useAlgorithms = useAlgorithms
64
        self.useBenchmarks = useBenchmarks
65
        self.results = {}
66
67
    def benchmarkFactory(self, name):
68
        r"""Create optimization task.
69
70
        Args:
71
                name (str): Benchmark name.
72
73
        Returns:
74
                Task: Optimization task to use.
75
76
        """
77
78
        from NiaPy.task import StoppingTask, OptimizationType
79
        return StoppingTask(D=self.D, nFES=self.nFES, optType=OptimizationType.MINIMIZATION, benchmark=name)
80
81
    @classmethod
82
    def __createExportDir(cls):
83
        r"""Create export directory if not already createed."""
84
        if not os.path.exists("export"):
85
            os.makedirs("export")
86
87
    @classmethod
88
    def __generateExportName(cls, extension):
89
        r"""Generate export file name.
90
91
        Args:
92
                extension (str): File format.
93
94
        Returns:
95
96
        """
97
98
        return "export/" + str(datetime.datetime.now()).replace(":", ".") + "." + extension
99
100
    def __exportToLog(self):
101
        r"""Print the results to terminal."""
102
103
        print(self.results)
104
105
    def __exportToJson(self):
106
        r"""Export the results in the JSON form.
107
108
        See Also:
109
                * :func:`NiaPy.Runner.__createExportDir`
110
111
        """
112
113
        self.__createExportDir()
114
        with open(self.__generateExportName("json"), "w") as outFile:
115
            json.dump(self.results, outFile)
116
            logger.info("Export to JSON completed!")
117
118
    def __exportToXls(self):
119
        r"""Export the results in the xlsx form.
120
121
        See Also:
122
                :func:`NiaPy.Runner.__generateExportName`
123
124
        """
125
126
        self.__createExportDir()
127
        workbook = xlsxwriter.Workbook(self.__generateExportName("xlsx"))
128
        worksheet = workbook.add_worksheet()
129
        row, col, nRuns = 0, 0, 0
130
131
        for alg in self.results:
132
            _, col = worksheet.write(row, col, alg), col + 1
133
            for bench in self.results[alg]:
134
                worksheet.write(row, col, bench)
135
                nRuns = len(self.results[alg][bench])
136
                for i in range(len(self.results[alg][bench])):
137
                    _, row = worksheet.write(row, col, self.results[alg][bench][i]), row + 1
138
                row, col = row - len(self.results[alg][bench]), col + 1
139
            row, col = row + 1 + nRuns, col - 1 + len(self.results[alg])
140
141
        workbook.close()
142
        logger.info("Export to XLSX completed!")
143
144
    def __exportToLatex(self):
145
        r"""Export the results in the form of latex table.
146
147
        See Also:
148
                :func:`NiaPy.Runner.__createExportDir`
149
                :func:`NiaPy.Runner.__generateExportName`
150
151
        """
152
153
        self.__createExportDir()
154
155
        metrics = ["Best", "Median", "Worst", "Mean", "Std."]
156
157
        def only_upper(s):
158
            return "".join(c for c in s if c.isupper())
159
160
        with open(self.__generateExportName("tex"), "a") as outFile:
161
            outFile.write("\\documentclass{article}\n")
162
            outFile.write("\\usepackage[utf8]{inputenc}\n")
163
            outFile.write("\\usepackage{siunitx}\n")
164
            outFile.write("\\sisetup{\n")
165
            outFile.write("round-mode=places,round-precision=3}\n")
166
            outFile.write("\\begin{document}\n")
167
            outFile.write("\\begin{table}[h]\n")
168
            outFile.write("\\centering\n")
169
            begin_tabular = "\\begin{tabular}{cc"
170
            for alg in self.results:
171
                for _i in range(len(self.results[alg])):
172
                    begin_tabular += "S"
173
                firstLine = "   &"
174
                for benchmark in self.results[alg].keys():
175
                    firstLine += "  &   \\multicolumn{1}{c}{\\textbf{" + benchmark + "}}"
176
                firstLine += " \\\\"
177
                break
178
            begin_tabular += "}\n"
179
            outFile.write(begin_tabular)
180
            outFile.write("\\hline\n")
181
            outFile.write(firstLine + "\n")
0 ignored issues
show
introduced by
The variable firstLine does not seem to be defined for all execution paths.
Loading history...
182
            outFile.write("\\hline\n")
183
            for alg in self.results:
184
                for metric in metrics:
185
                    line = ""
186
                    if metric != "Worst":
187
                        line += "   &   " + metric
188
                    else:
189
                        shortAlg = ""
190
                        if alg.endswith("Algorithm"):
191
                            shortAlg = only_upper(alg[:-9])
192
                        else:
193
                            shortAlg = only_upper(alg)
194
                        line += "\\textbf{" + shortAlg + "} &   " + metric
195
                        for benchmark in self.results[alg]:
196
                            if metric == "Best":
197
                                line += "   &   " + str(amin(self.results[alg][benchmark]))
198
                            elif metric == "Median":
199
                                line += "   &   " + str(median(self.results[alg][benchmark]))
200
                            elif metric == "Worst":
201
                                line += "   &   " + str(amax(self.results[alg][benchmark]))
202
                            elif metric == "Mean":
203
                                line += "   &   " + str(mean(self.results[alg][benchmark]))
204
                            else:
205
                                line += "   &   " + str(std(self.results[alg][benchmark]))
206
                        line += "   \\\\"
207
                        outFile.write(line + "\n")
208
                    outFile.write("\\hline\n")
209
                outFile.write("\\end{tabular}\n")
210
                outFile.write("\\end{table}\n")
211
                outFile.write("\\end{document}")
212
        logger.info("Export to Latex completed!")
213
214
    def run(self, export="log", verbose=False):
215
        """Execute runner.
216
217
        Arguments:
218
                export (str): Takes export type (e.g. log, json, xlsx, latex) (default: "log")
219
                verbose (bool: Switch for verbose logging (default: {False})
220
221
        Raises:
222
                TypeError: Raises TypeError if export type is not supported
223
224
        Returns:
225
                dict: Returns dictionary of results
226
227
        See Also:
228
                * :func:`NiaPy.Runner.useAlgorithms`
229
                * :func:`NiaPy.Runner.useBenchmarks`
230
                * :func:`NiaPy.Runner.__algorithmFactory`
231
232
        """
233
234
        for alg in self.useAlgorithms:
235
            alg_name = ""
236
            if not isinstance(alg, "".__class__):
237
                alg_name = str(type(alg).__name__)
238
            else:
239
                alg_name = alg
240
241
            self.results[alg_name] = {}
242
            if verbose:
243
                logger.info("Running %s...", alg_name)
244
245
            for bench in self.useBenchmarks:
246
                bench_name = ""
247
                if not isinstance(bench, "".__class__):
248
                    bench_name = str(type(bench).__name__)
249
                else:
250
                    bench_name = bench
251
                if verbose:
252
                    logger.info("Running %s algorithm on %s benchmark...", alg_name, bench_name)
253
254
                benchmark_stopping_task = self.benchmarkFactory(bench)
255
                self.results[alg_name][bench_name] = []
256
                for _ in range(self.nRuns):
257
                    algorithm = AlgorithmUtility().get_algorithm(alg)
258
                    self.results[alg_name][bench_name].append(algorithm.run(benchmark_stopping_task))
259
            if verbose:
260
                logger.info("---------------------------------------------------")
261
        if export == "log":
262
            self.__exportToLog()
263
        elif export == "json":
264
            self.__exportToJson()
265
        elif export == "xlsx":
266
            self.__exportToXls()
267
        elif export == "latex":
268
            self.__exportToLatex()
269
        else:
270
            raise TypeError("Passed export type is not supported!")
271