Passed
Pull Request — master (#39)
by Grega
01:24
created

Runner   B

Complexity

Total Complexity 44

Size/Duplication

Total Lines 195
Duplicated Lines 0 %

Importance

Changes 16
Bugs 5 Features 4
Metric Value
c 16
b 5
f 4
dl 0
loc 195
rs 8.3396
wmc 44

9 Methods

Rating   Name   Duplication   Size   Complexity  
A __generateExportName() 0 3 1
D __algorithmFactory() 0 28 8
F run() 0 38 12
B __exportToXls() 0 28 4
A __init__() 0 20 1
A __exportToJson() 0 5 2
A __createExportDir() 0 4 2
F __exportToLatex() 0 57 13
A __exportToLog() 0 2 1

How to fix   Complexity   

Complex Class

Complex classes like Runner often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
from __future__ import print_function  # for backward compatibility purpose
2
3
import os
4
import logging
5
import json
6
import datetime
7
import xlsxwriter
8
import numpy as np
9
from tabulate import tabulate
10
from NiaPy import algorithms, benchmarks
11
12
__all__ = ['algorithms', 'benchmarks']
13
__project__ = 'NiaPy'
14
__version__ = '0.0.0'
15
16
VERSION = "{0} v{1}".format(__project__, __version__)
17
18
logging.basicConfig()
19
logger = logging.getLogger('NiaPy')
20
logger.setLevel('INFO')
21
22
23
class Runner(object):
24
    # pylint: disable=too-many-instance-attributes, too-many-locals
25
    def __init__(self, D, NP, nFES, nRuns, useAlgorithms, useBenchmarks,
26
                 A=0.5, r=0.5, Qmin=0.0, Qmax=2.0, F=0.5, CR=0.9, alpha=0.5,
27
                 betamin=0.2, gamma=1.0, p=0.5):
28
        self.D = D
29
        self.NP = NP
30
        self.nFES = nFES
31
        self.nRuns = nRuns
32
        self.useAlgorithms = useAlgorithms
33
        self.useBenchmarks = useBenchmarks
34
        self.A = A
35
        self.r = r
36
        self.Qmin = Qmin
37
        self.Qmax = Qmax
38
        self.F = F
39
        self.CR = CR
40
        self.alpha = alpha
41
        self.betamin = betamin
42
        self.gamma = gamma
43
        self.p = p
44
        self.results = {}
45
46
    def __algorithmFactory(self, name, benchmark):
47
        bench = benchmarks.utility.Utility().get_benchmark(benchmark)
48
        algorithm = None
49
50
        if name == 'BatAlgorithm':
51
            algorithm = algorithms.basic.BatAlgorithm(
52
                self.D, self.NP, self.nFES, self.A, self.r, self.Qmin, self.Qmax, bench)
53
        elif name == 'DifferentialEvolutionAlgorithm':
54
            algorithm = algorithms.basic.DifferentialEvolutionAlgorithm(
55
                self.D, self.NP, self.nFES, self.F, self.CR, bench)
56
        elif name == 'FireflyAlgorithm':
57
            algorithm = algorithms.basic.FireflyAlgorithm(
58
                self.D, self.NP, self.nFES, self.alpha, self.betamin, self.gamma, bench)
59
        elif name == 'FlowerPollinationAlgorithm':
60
            algorithm = algorithms.basic.FlowerPollinationAlgorithm(
61
                self.D, self.NP, self.nFES, self.p, bench)
62
        elif name == 'GreyWolfOptimizer':
63
            algorithm = algorithms.basic.GreyWolfOptimizer(
64
                self.D, self.NP, self.nFES, bench)
65
        elif name == 'ArtificialBeeColonyAlgorithm':
66
            algorithm = algorithms.basic.ArtificialBeeColonyAlgorithm(self.D, self.NP, self.nFES, bench)
67
        elif name == 'HybridBatAlgorithm':
68
            algorithm = algorithms.modified.HybridBatAlgorithm(
69
                self.D, self.NP, self.nFES, self.A, self.r, self.F, self.CR, self.Qmin, self.Qmax, bench)
70
        else:
71
            raise TypeError('Passed benchmark is not defined!')
72
73
        return algorithm
74
75
    @classmethod
76
    def __createExportDir(cls):
77
        if not os.path.exists('export'):
78
            os.makedirs('export')
79
80
    @classmethod
81
    def __generateExportName(cls, extension):
82
        return 'export/' + str(datetime.datetime.now()) + '.' + extension
83
84
    def __exportToLog(self):
85
        print(self.results)
86
87
    def __exportToJson(self):
88
        self.__createExportDir()
89
        with open(self.__generateExportName('json'), 'w') as outFile:
90
            json.dump(self.results, outFile)
91
            logger.info('Export to JSON completed!')
92
93
    def __exportToXls(self):
94
        workbook = xlsxwriter.Workbook(self.__generateExportName('xlsx'))
95
        worksheet = workbook.add_worksheet()
96
97
        row = 0
98
        col = 0
99
        nRuns = 0
100
101
        for alg in self.results:
102
            worksheet.write(row, col, alg)
103
            col += 1
104
105
            for bench in self.results[alg]:
106
                worksheet.write(row, col, bench)
107
108
                nRuns = len(self.results[alg][bench])
109
110
                for i in range(len(self.results[alg][bench])):
111
                    row += 1
112
                    worksheet.write(row, col, self.results[alg][bench][i])
113
114
                row -= len(self.results[alg][bench])  # jump back up
115
                col += 1
116
117
            row += 1 + nRuns  # jump down to row after previous results
118
            col -= 1 + len(self.results[alg])
119
120
        logger.info('Export to XLSX completed!')
121
122
    def __exportToLatex(self):
123
        metrics = ['Best', 'Median', 'Worst', 'Mean', 'Std.']
124
125
        with open(self.__generateExportName('tex'), 'a') as outFile:
126
            outFile.write('\\begin{table}[h]\n')
127
            outFile.write('\\centering\n')
128
129
            begin_tabular = '\\begin{tabular}{c|c'
130
131
            for alg in self.results:
132
                for _i in range(len(self.results[alg])):
133
                    begin_tabular += '|c'
134
135
                firstLine = '   &'
136
137
                for benchmark in self.results[alg].keys():
138
                    firstLine += '  &   ' + benchmark
139
140
                firstLine += ' \\\\'
141
142
                break
143
144
            begin_tabular += '}\n'
145
            outFile.write(begin_tabular)
146
            outFile.write('\\hline\n')
147
            outFile.write(firstLine + '\n')
148
            outFile.write('\\hline\n')
149
150
            for alg in self.results:
151
                for metric in metrics:
152
                    line = ''
153
154
                    if metric != 'Worst':
155
                        line += '   &   ' + metric
156
                    else:
157
                        line += alg + ' &   ' + metric
158
159
                    for benchmark in self.results[alg]:
160
                        if metric == 'Best':
161
                            line += '   &   ' + str(np.amin(self.results[alg][benchmark]))
162
                        elif metric == 'Median':
163
                            line += '   &   ' + str(np.median(self.results[alg][benchmark]))
164
                        elif metric == 'Worst':
165
                            line += '   &   ' + str(np.amax(self.results[alg][benchmark]))
166
                        elif metric == 'Mean':
167
                            line += '   &   ' + str(np.mean(self.results[alg][benchmark]))
168
                        else:
169
                            line += '   &   ' + str(np.std(self.results[alg][benchmark]))
170
171
                    line += '   \\\\'
172
                    outFile.write(line + '\n')
173
174
                outFile.write('\\hline\n')
175
            outFile.write('\\end{tabular}\n')
176
            outFile.write('\\end{table}\n')
177
178
        logger.info('Export to Latex completed!')
179
180
    def run(self, export='log', verbose=False):
181
        for alg in self.useAlgorithms:
182
            self.results[alg] = {}
183
            if verbose:
184
                logger.info('Running %s...', alg)
185
            for bench in self.useBenchmarks:
186
                benchName = ''
187
                # check if passed benchmark is class
188
                if not isinstance(bench, ''.__class__):
189
                    # set class name as benchmark name
190
                    benchName = str(type(bench).__name__)
191
                else:
192
                    benchName = bench
193
194
                if verbose:
195
                    logger.info('Running %s algorithm on %s benchmark...', alg, benchName)
196
197
                self.results[alg][benchName] = []
198
199
                for _i in range(self.nRuns):
200
                    algorithm = self.__algorithmFactory(alg, bench)
201
                    self.results[alg][benchName].append(algorithm.run())
202
203
            if verbose:
204
                logger.info('---------------------------------------------------')
205
206
        if export == 'log':
207
            self.__exportToLog()
208
        elif export == 'json':
209
            self.__exportToJson()
210
        elif export == 'xlsx':
211
            self.__exportToXls()
212
        elif export == 'latex':
213
            self.__exportToLatex()
214
        else:
215
            raise TypeError('Passed export type is not supported!')
216
217
        return self.results
218