Passed
Push — master ( a4a5a4...600197 )
by Simon
02:08
created

test_performance   A

Complexity

Total Complexity 12

Size/Duplication

Total Lines 363
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
wmc 12
eloc 269
dl 0
loc 363
rs 10
c 0
b 0
f 0

12 Functions

Rating   Name   Duplication   Size   Complexity  
A test_RandomSearchOptimizer() 0 26 1
A test_RandomAnnealingOptimizer() 0 26 1
A test_TabuOptimizer() 0 26 1
A test_ParallelTemperingOptimizer() 0 26 1
A test_RandomRestartHillClimbingOptimizer() 0 26 1
A test_BayesianOptimizer() 0 26 1
A test_EvolutionStrategyOptimizer() 0 26 1
A test_SimulatedAnnealingOptimizer() 0 26 1
A test_StochasticTunnelingOptimizer() 0 26 1
A test_ParticleSwarmOptimizer() 0 26 1
A test_StochasticHillClimbingOptimizer() 0 26 1
A test_HillClimbingOptimizer() 0 26 1
1
# Author: Simon Blanke
2
# Email: [email protected]
3
# License: MIT License
4
5
from sklearn.datasets import load_iris
6
7
data = load_iris()
8
X = data.data
9
y = data.target
10
11
n_iter_0 = 0
12
n_iter_1 = 33
13
random_state = 0
14
cv = 2
15
n_jobs = 2
16
17
search_config = {
18
    "sklearn.tree.DecisionTreeClassifier": {
19
        "criterion": ["gini", "entropy"],
20
        "max_depth": range(1, 21),
21
        "min_samples_split": range(2, 21),
22
        "min_samples_leaf": range(1, 21),
23
    }
24
}
25
26
warm_start = {"sklearn.tree.DecisionTreeClassifier": {"max_depth": [1]}}
27
28
29
def test_HillClimbingOptimizer():
30
    from hyperactive import HillClimbingOptimizer
31
32
    opt0 = HillClimbingOptimizer(
33
        search_config,
34
        n_iter_0,
35
        random_state=random_state,
36
        verbosity=1,
37
        cv=cv,
38
        n_jobs=1,
39
        warm_start=warm_start,
40
    )
41
    opt0.fit(X, y)
42
43
    opt1 = HillClimbingOptimizer(
44
        search_config,
45
        n_iter_1,
46
        random_state=random_state,
47
        verbosity=1,
48
        cv=cv,
49
        n_jobs=n_jobs,
50
        warm_start=warm_start,
51
    )
52
    opt1.fit(X, y)
53
54
    assert opt0.score_best < opt1.score_best
55
56
57
def test_StochasticHillClimbingOptimizer():
58
    from hyperactive import StochasticHillClimbingOptimizer
59
60
    opt0 = StochasticHillClimbingOptimizer(
61
        search_config,
62
        n_iter_0,
63
        random_state=random_state,
64
        verbosity=0,
65
        cv=cv,
66
        n_jobs=1,
67
        warm_start=warm_start,
68
    )
69
    opt0.fit(X, y)
70
71
    opt1 = StochasticHillClimbingOptimizer(
72
        search_config,
73
        n_iter_1,
74
        random_state=random_state,
75
        verbosity=0,
76
        cv=cv,
77
        n_jobs=n_jobs,
78
        warm_start=warm_start,
79
    )
80
    opt1.fit(X, y)
81
82
    assert opt0.score_best < opt1.score_best
83
84
85
def test_TabuOptimizer():
86
    from hyperactive import TabuOptimizer
87
88
    opt0 = TabuOptimizer(
89
        search_config,
90
        n_iter_0,
91
        random_state=random_state,
92
        verbosity=0,
93
        cv=cv,
94
        n_jobs=1,
95
        warm_start=warm_start,
96
    )
97
    opt0.fit(X, y)
98
99
    opt1 = TabuOptimizer(
100
        search_config,
101
        n_iter_1,
102
        random_state=random_state,
103
        verbosity=0,
104
        cv=cv,
105
        n_jobs=n_jobs,
106
        warm_start=warm_start,
107
    )
108
    opt1.fit(X, y)
109
110
    assert opt0.score_best < opt1.score_best
111
112
113
def test_RandomSearchOptimizer():
114
    from hyperactive import RandomSearchOptimizer
115
116
    opt0 = RandomSearchOptimizer(
117
        search_config,
118
        n_iter_0,
119
        random_state=random_state,
120
        verbosity=0,
121
        cv=cv,
122
        n_jobs=1,
123
        warm_start=warm_start,
124
    )
125
    opt0.fit(X, y)
126
127
    opt1 = RandomSearchOptimizer(
128
        search_config,
129
        n_iter_1,
130
        random_state=random_state,
131
        verbosity=0,
132
        cv=cv,
133
        n_jobs=n_jobs,
134
        warm_start=warm_start,
135
    )
136
    opt1.fit(X, y)
137
138
    assert opt0.score_best < opt1.score_best
139
140
141
def test_RandomRestartHillClimbingOptimizer():
142
    from hyperactive import RandomRestartHillClimbingOptimizer
143
144
    opt0 = RandomRestartHillClimbingOptimizer(
145
        search_config,
146
        n_iter_0,
147
        random_state=random_state,
148
        verbosity=0,
149
        cv=cv,
150
        n_jobs=1,
151
        warm_start=warm_start,
152
    )
153
    opt0.fit(X, y)
154
155
    opt1 = RandomRestartHillClimbingOptimizer(
156
        search_config,
157
        n_iter_1,
158
        random_state=random_state,
159
        verbosity=0,
160
        cv=cv,
161
        n_jobs=n_jobs,
162
        warm_start=warm_start,
163
    )
164
    opt1.fit(X, y)
165
166
    assert opt0.score_best < opt1.score_best
167
168
169
def test_RandomAnnealingOptimizer():
170
    from hyperactive import RandomAnnealingOptimizer
171
172
    opt0 = RandomAnnealingOptimizer(
173
        search_config,
174
        n_iter_0,
175
        random_state=random_state,
176
        verbosity=0,
177
        cv=cv,
178
        n_jobs=1,
179
        warm_start=warm_start,
180
    )
181
    opt0.fit(X, y)
182
183
    opt1 = RandomAnnealingOptimizer(
184
        search_config,
185
        n_iter_1,
186
        random_state=random_state,
187
        verbosity=0,
188
        cv=cv,
189
        n_jobs=n_jobs,
190
        warm_start=warm_start,
191
    )
192
    opt1.fit(X, y)
193
194
    assert opt0.score_best < opt1.score_best
195
196
197
def test_SimulatedAnnealingOptimizer():
198
    from hyperactive import SimulatedAnnealingOptimizer
199
200
    opt0 = SimulatedAnnealingOptimizer(
201
        search_config,
202
        n_iter_0,
203
        random_state=random_state,
204
        verbosity=0,
205
        cv=cv,
206
        n_jobs=1,
207
        warm_start=warm_start,
208
    )
209
    opt0.fit(X, y)
210
211
    opt1 = SimulatedAnnealingOptimizer(
212
        search_config,
213
        n_iter_1,
214
        random_state=random_state,
215
        verbosity=0,
216
        cv=cv,
217
        n_jobs=n_jobs,
218
        warm_start=warm_start,
219
    )
220
    opt1.fit(X, y)
221
222
    assert opt0.score_best < opt1.score_best
223
224
225
def test_StochasticTunnelingOptimizer():
226
    from hyperactive import StochasticTunnelingOptimizer
227
228
    opt0 = StochasticTunnelingOptimizer(
229
        search_config,
230
        n_iter_0,
231
        random_state=random_state,
232
        verbosity=0,
233
        cv=cv,
234
        n_jobs=1,
235
        warm_start=warm_start,
236
    )
237
    opt0.fit(X, y)
238
239
    opt1 = StochasticTunnelingOptimizer(
240
        search_config,
241
        n_iter_1,
242
        random_state=random_state,
243
        verbosity=0,
244
        cv=cv,
245
        n_jobs=n_jobs,
246
        warm_start=warm_start,
247
    )
248
    opt1.fit(X, y)
249
250
    assert opt0.score_best < opt1.score_best
251
252
253
def test_ParallelTemperingOptimizer():
254
    from hyperactive import ParallelTemperingOptimizer
255
256
    opt0 = ParallelTemperingOptimizer(
257
        search_config,
258
        n_iter_0,
259
        random_state=random_state,
260
        verbosity=0,
261
        cv=cv,
262
        n_jobs=1,
263
        warm_start=warm_start,
264
    )
265
    opt0.fit(X, y)
266
267
    opt1 = ParallelTemperingOptimizer(
268
        search_config,
269
        n_iter_1,
270
        random_state=random_state,
271
        verbosity=0,
272
        cv=cv,
273
        n_jobs=n_jobs,
274
        warm_start=warm_start,
275
    )
276
    opt1.fit(X, y)
277
278
    assert opt0.score_best < opt1.score_best
279
280
281
def test_ParticleSwarmOptimizer():
282
    from hyperactive import ParticleSwarmOptimizer
283
284
    opt0 = ParticleSwarmOptimizer(
285
        search_config,
286
        n_iter_0,
287
        random_state=random_state,
288
        verbosity=0,
289
        cv=cv,
290
        n_jobs=1,
291
        warm_start=warm_start,
292
    )
293
    opt0.fit(X, y)
294
295
    opt1 = ParticleSwarmOptimizer(
296
        search_config,
297
        n_iter_1,
298
        random_state=random_state,
299
        verbosity=0,
300
        cv=cv,
301
        n_jobs=n_jobs,
302
        warm_start=warm_start,
303
    )
304
    opt1.fit(X, y)
305
306
    assert opt0.score_best < opt1.score_best
307
308
309
def test_EvolutionStrategyOptimizer():
310
    from hyperactive import EvolutionStrategyOptimizer
311
312
    opt0 = EvolutionStrategyOptimizer(
313
        search_config,
314
        n_iter_0,
315
        random_state=random_state,
316
        verbosity=0,
317
        cv=cv,
318
        n_jobs=1,
319
        warm_start=warm_start,
320
    )
321
    opt0.fit(X, y)
322
323
    opt1 = EvolutionStrategyOptimizer(
324
        search_config,
325
        n_iter_1,
326
        random_state=random_state,
327
        verbosity=0,
328
        cv=cv,
329
        n_jobs=n_jobs,
330
        warm_start=warm_start,
331
    )
332
    opt1.fit(X, y)
333
334
    assert opt0.score_best < opt1.score_best
335
336
337
def test_BayesianOptimizer():
338
    from hyperactive import BayesianOptimizer
339
340
    opt0 = BayesianOptimizer(
341
        search_config,
342
        n_iter_0,
343
        random_state=random_state,
344
        verbosity=0,
345
        cv=cv,
346
        n_jobs=1,
347
        warm_start=warm_start,
348
    )
349
    opt0.fit(X, y)
350
351
    opt1 = BayesianOptimizer(
352
        search_config,
353
        n_iter_1,
354
        random_state=random_state,
355
        verbosity=0,
356
        cv=cv,
357
        n_jobs=n_jobs,
358
        warm_start=warm_start,
359
    )
360
    opt1.fit(X, y)
361
362
    assert opt0.score_best < opt1.score_best
363