Completed
Push — master ( d428f0...278eed )
by Grega
10s
created

Runner.__exportToJson()   A

Complexity

Conditions 2

Size

Total Lines 5

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 2
c 0
b 0
f 0
dl 0
loc 5
rs 9.4285
1
from __future__ import print_function  # for backward compatibility purpose
2
3
import os
4
import logging
5
import json
6
import datetime
7
import xlsxwriter
8
import numpy as np
9
from NiaPy import algorithms, benchmarks
10
11
__all__ = ['algorithms', 'benchmarks']
12
__project__ = 'NiaPy'
13
__version__ = '0.0.0'
14
15
VERSION = "{0} v{1}".format(__project__, __version__)
16
17
logging.basicConfig()
18
logger = logging.getLogger('NiaPy')
19
logger.setLevel('INFO')
20
21
22
class Runner(object):
23
    # pylint: disable=too-many-instance-attributes, too-many-locals
24
    def __init__(self, D, NP, nFES, nRuns, useAlgorithms, useBenchmarks,
25
                 A=0.5, r=0.5, Qmin=0.0, Qmax=2.0, F=0.5, CR=0.9, alpha=0.5,
26
                 betamin=0.2, gamma=1.0, p=0.5):
27
        self.D = D
28
        self.NP = NP
29
        self.nFES = nFES
30
        self.nRuns = nRuns
31
        self.useAlgorithms = useAlgorithms
32
        self.useBenchmarks = useBenchmarks
33
        self.A = A
34
        self.r = r
35
        self.Qmin = Qmin
36
        self.Qmax = Qmax
37
        self.F = F
38
        self.CR = CR
39
        self.alpha = alpha
40
        self.betamin = betamin
41
        self.gamma = gamma
42
        self.p = p
43
        self.results = {}
44
45
    def __algorithmFactory(self, name, benchmark):
46
        bench = benchmarks.utility.Utility().get_benchmark(benchmark)
47
        algorithm = None
48
49
        if name == 'BatAlgorithm':
50
            algorithm = algorithms.basic.BatAlgorithm(
51
                self.D, self.NP, self.nFES, self.A, self.r, self.Qmin, self.Qmax, bench)
52
        elif name == 'DifferentialEvolutionAlgorithm':
53
            algorithm = algorithms.basic.DifferentialEvolutionAlgorithm(
54
                self.D, self.NP, self.nFES, self.F, self.CR, bench)
55
        elif name == 'FireflyAlgorithm':
56
            algorithm = algorithms.basic.FireflyAlgorithm(
57
                self.D, self.NP, self.nFES, self.alpha, self.betamin, self.gamma, bench)
58
        elif name == 'FlowerPollinationAlgorithm':
59
            algorithm = algorithms.basic.FlowerPollinationAlgorithm(
60
                self.D, self.NP, self.nFES, self.p, bench)
61
        elif name == 'GreyWolfOptimizer':
62
            algorithm = algorithms.basic.GreyWolfOptimizer(
63
                self.D, self.NP, self.nFES, bench)
64
        elif name == 'ArtificialBeeColonyAlgorithm':
65
            algorithm = algorithms.basic.ArtificialBeeColonyAlgorithm(self.D, self.NP, self.nFES, bench)
66
        elif name == 'HybridBatAlgorithm':
67
            algorithm = algorithms.modified.HybridBatAlgorithm(
68
                self.D, self.NP, self.nFES, self.A, self.r, self.F, self.CR, self.Qmin, self.Qmax, bench)
69
        else:
70
            raise TypeError('Passed benchmark is not defined!')
71
72
        return algorithm
73
74
    @classmethod
75
    def __createExportDir(cls):
76
        if not os.path.exists('export'):
77
            os.makedirs('export')
78
79
    @classmethod
80
    def __generateExportName(cls, extension):
81
        return 'export/' + str(datetime.datetime.now()) + '.' + extension
82
83
    def __exportToLog(self):
84
        print(self.results)
85
86
    def __exportToJson(self):
87
        self.__createExportDir()
88
        with open(self.__generateExportName('json'), 'w') as outFile:
89
            json.dump(self.results, outFile)
90
            logger.info('Export to JSON completed!')
91
92
    def __exportToXls(self):
93
        workbook = xlsxwriter.Workbook(self.__generateExportName('xlsx'))
94
        worksheet = workbook.add_worksheet()
95
96
        row = 0
97
        col = 0
98
        nRuns = 0
99
100
        for alg in self.results:
101
            worksheet.write(row, col, alg)
102
            col += 1
103
104
            for bench in self.results[alg]:
105
                worksheet.write(row, col, bench)
106
107
                nRuns = len(self.results[alg][bench])
108
109
                for i in range(len(self.results[alg][bench])):
110
                    row += 1
111
                    worksheet.write(row, col, self.results[alg][bench][i])
112
113
                row -= len(self.results[alg][bench])  # jump back up
114
                col += 1
115
116
            row += 1 + nRuns  # jump down to row after previous results
117
            col -= 1 + len(self.results[alg])
118
119
        logger.info('Export to XLSX completed!')
120
121
    def __exportToLatex(self):
122
        metrics = ['Best', 'Median', 'Worst', 'Mean', 'Std.']
123
124
        def only_upper(s):
125
            return "".join(c for c in s if c.isupper())
126
127
        with open(self.__generateExportName('tex'), 'a') as outFile:
128
            outFile.write('\\documentclass{article}\n')
129
            outFile.write('\\usepackage[utf8]{inputenc}\n')
130
            outFile.write('\\usepackage{siunitx}\n')
131
            outFile.write('\\sisetup{\n')
132
            outFile.write('round-mode=places,round-precision=3}\n')
133
            outFile.write('\\begin{document}\n')
134
            outFile.write('\\begin{table}[h]\n')
135
            outFile.write('\\centering\n')
136
137
            begin_tabular = '\\begin{tabular}{cc'
138
139
            for alg in self.results:
140
                for _i in range(len(self.results[alg])):
141
                    begin_tabular += 'S'
142
143
                firstLine = '   &'
144
145
                for benchmark in self.results[alg].keys():
146
                    firstLine += '  &   \\multicolumn{1}{c}{\\textbf{' + \
147
                        benchmark + '}}'
148
149
                firstLine += ' \\\\'
150
151
                break
152
153
            begin_tabular += '}\n'
154
            outFile.write(begin_tabular)
155
            outFile.write('\\hline\n')
156
            outFile.write(firstLine + '\n')
157
            outFile.write('\\hline\n')
158
159
            for alg in self.results:
160
                for metric in metrics:
161
                    line = ''
162
163
                    if metric != 'Worst':
164
                        line += '   &   ' + metric
165
                    else:
166
                        shortAlg = ''
167
                        if alg.endswith('Algorithm'):
168
                            shortAlg = only_upper(alg[:-9])
169
                        else:
170
                            shortAlg = only_upper(alg)
171
                        line += '\\textbf{' + shortAlg + '} &   ' + metric
172
173
                    for benchmark in self.results[alg]:
174
                        if metric == 'Best':
175
                            line += '   &   ' + str(np.amin(self.results[alg][benchmark]))
176
                        elif metric == 'Median':
177
                            line += '   &   ' + str(np.median(self.results[alg][benchmark]))
178
                        elif metric == 'Worst':
179
                            line += '   &   ' + str(np.amax(self.results[alg][benchmark]))
180
                        elif metric == 'Mean':
181
                            line += '   &   ' + str(np.mean(self.results[alg][benchmark]))
182
                        else:
183
                            line += '   &   ' + str(np.std(self.results[alg][benchmark]))
184
185
                    line += '   \\\\'
186
                    outFile.write(line + '\n')
187
188
                outFile.write('\\hline\n')
189
            outFile.write('\\end{tabular}\n')
190
            outFile.write('\\end{table}\n')
191
            outFile.write('\\end{document}')
192
193
        logger.info('Export to Latex completed!')
194
195
    def run(self, export='log', verbose=False):
196
        for alg in self.useAlgorithms:
197
            self.results[alg] = {}
198
            if verbose:
199
                logger.info('Running %s...', alg)
200
            for bench in self.useBenchmarks:
201
                benchName = ''
202
                # check if passed benchmark is class
203
                if not isinstance(bench, ''.__class__):
204
                    # set class name as benchmark name
205
                    benchName = str(type(bench).__name__)
206
                else:
207
                    benchName = bench
208
209
                if verbose:
210
                    logger.info('Running %s algorithm on %s benchmark...', alg, benchName)
211
212
                self.results[alg][benchName] = []
213
214
                for _i in range(self.nRuns):
215
                    algorithm = self.__algorithmFactory(alg, bench)
216
                    self.results[alg][benchName].append(algorithm.run())
217
218
            if verbose:
219
                logger.info('---------------------------------------------------')
220
221
        if export == 'log':
222
            self.__exportToLog()
223
        elif export == 'json':
224
            self.__exportToJson()
225
        elif export == 'xlsx':
226
            self.__exportToXls()
227
        elif export == 'latex':
228
            self.__exportToLatex()
229
        else:
230
            raise TypeError('Passed export type is not supported!')
231
232
        return self.results
233