1
|
|
|
import numpy as np |
2
|
|
|
from tqdm import tqdm |
3
|
|
|
|
4
|
|
|
from gradient_free_optimizers import ( |
5
|
|
|
HillClimbingOptimizer, |
6
|
|
|
StochasticHillClimbingOptimizer, |
7
|
|
|
TabuOptimizer, |
8
|
|
|
RandomSearchOptimizer, |
9
|
|
|
RandomRestartHillClimbingOptimizer, |
10
|
|
|
RandomAnnealingOptimizer, |
11
|
|
|
SimulatedAnnealingOptimizer, |
12
|
|
|
ParallelTemperingOptimizer, |
13
|
|
|
ParticleSwarmOptimizer, |
14
|
|
|
EvolutionStrategyOptimizer, |
15
|
|
|
BayesianOptimizer, |
16
|
|
|
TreeStructuredParzenEstimators, |
17
|
|
|
DecisionTreeOptimizer, |
18
|
|
|
EnsembleOptimizer, |
19
|
|
|
) |
20
|
|
|
|
21
|
|
|
|
22
|
|
|
def objective_function(pos_new): |
23
|
|
|
score = -pos_new[0] * pos_new[0] |
24
|
|
|
return score |
25
|
|
|
|
26
|
|
|
|
27
|
|
|
search_space = [np.arange(-100, 100, 1)] |
28
|
|
|
initialize = {"vertices": 2} |
29
|
|
|
|
30
|
|
|
n_opts = 33 |
31
|
|
|
n_iter = 50 |
32
|
|
|
min_score_accept = -500 |
33
|
|
|
|
34
|
|
|
|
35
|
|
View Code Duplication |
def test_HillClimbingOptimizer_convergence(): |
|
|
|
|
36
|
|
|
scores = [] |
37
|
|
|
for rnd_st in tqdm(range(n_opts)): |
38
|
|
|
opt = HillClimbingOptimizer(search_space) |
39
|
|
|
opt.search( |
40
|
|
|
objective_function, |
41
|
|
|
n_iter=n_iter, |
42
|
|
|
random_state=rnd_st, |
43
|
|
|
memory=False, |
44
|
|
|
print_results=False, |
45
|
|
|
progress_bar=False, |
46
|
|
|
initialize=initialize, |
47
|
|
|
) |
48
|
|
|
|
49
|
|
|
scores.append(opt.best_score) |
50
|
|
|
|
51
|
|
|
score_mean = np.array(scores).mean() |
52
|
|
|
assert min_score_accept < score_mean |
53
|
|
|
|
54
|
|
|
|
55
|
|
View Code Duplication |
def test_StochasticHillClimbingOptimizer_convergence(): |
|
|
|
|
56
|
|
|
scores = [] |
57
|
|
|
for rnd_st in tqdm(range(n_opts)): |
58
|
|
|
opt = StochasticHillClimbingOptimizer(search_space) |
59
|
|
|
opt.search( |
60
|
|
|
objective_function, |
61
|
|
|
n_iter=n_iter, |
62
|
|
|
random_state=rnd_st, |
63
|
|
|
memory=False, |
64
|
|
|
print_results=False, |
65
|
|
|
progress_bar=False, |
66
|
|
|
initialize=initialize, |
67
|
|
|
) |
68
|
|
|
|
69
|
|
|
scores.append(opt.best_score) |
70
|
|
|
|
71
|
|
|
score_mean = np.array(scores).mean() |
72
|
|
|
assert min_score_accept < score_mean |
73
|
|
|
|
74
|
|
|
|
75
|
|
View Code Duplication |
def test_TabuOptimizer_convergence(): |
|
|
|
|
76
|
|
|
scores = [] |
77
|
|
|
for rnd_st in tqdm(range(n_opts)): |
78
|
|
|
opt = TabuOptimizer(search_space) |
79
|
|
|
opt.search( |
80
|
|
|
objective_function, |
81
|
|
|
n_iter=n_iter, |
82
|
|
|
random_state=rnd_st, |
83
|
|
|
memory=False, |
84
|
|
|
print_results=False, |
85
|
|
|
progress_bar=False, |
86
|
|
|
initialize=initialize, |
87
|
|
|
) |
88
|
|
|
|
89
|
|
|
scores.append(opt.best_score) |
90
|
|
|
|
91
|
|
|
score_mean = np.array(scores).mean() |
92
|
|
|
assert min_score_accept < score_mean |
93
|
|
|
|
94
|
|
|
|
95
|
|
View Code Duplication |
def test_RandomSearchOptimizer_convergence(): |
|
|
|
|
96
|
|
|
scores = [] |
97
|
|
|
for rnd_st in tqdm(range(n_opts)): |
98
|
|
|
opt = RandomSearchOptimizer(search_space) |
99
|
|
|
opt.search( |
100
|
|
|
objective_function, |
101
|
|
|
n_iter=n_iter, |
102
|
|
|
random_state=rnd_st, |
103
|
|
|
memory=False, |
104
|
|
|
print_results=False, |
105
|
|
|
progress_bar=False, |
106
|
|
|
initialize=initialize, |
107
|
|
|
) |
108
|
|
|
|
109
|
|
|
scores.append(opt.best_score) |
110
|
|
|
|
111
|
|
|
score_mean = np.array(scores).mean() |
112
|
|
|
assert min_score_accept < score_mean |
113
|
|
|
|
114
|
|
|
|
115
|
|
View Code Duplication |
def test_RandomRestartHillClimbingOptimizer_convergence(): |
|
|
|
|
116
|
|
|
scores = [] |
117
|
|
|
for rnd_st in tqdm(range(n_opts)): |
118
|
|
|
opt = RandomRestartHillClimbingOptimizer(search_space) |
119
|
|
|
opt.search( |
120
|
|
|
objective_function, |
121
|
|
|
n_iter=n_iter, |
122
|
|
|
random_state=rnd_st, |
123
|
|
|
memory=False, |
124
|
|
|
print_results=False, |
125
|
|
|
progress_bar=False, |
126
|
|
|
initialize=initialize, |
127
|
|
|
) |
128
|
|
|
|
129
|
|
|
scores.append(opt.best_score) |
130
|
|
|
|
131
|
|
|
score_mean = np.array(scores).mean() |
132
|
|
|
assert min_score_accept < score_mean |
133
|
|
|
|
134
|
|
|
|
135
|
|
View Code Duplication |
def test_RandomAnnealingOptimizer_convergence(): |
|
|
|
|
136
|
|
|
scores = [] |
137
|
|
|
for rnd_st in tqdm(range(n_opts)): |
138
|
|
|
opt = RandomAnnealingOptimizer(search_space) |
139
|
|
|
opt.search( |
140
|
|
|
objective_function, |
141
|
|
|
n_iter=n_iter, |
142
|
|
|
random_state=rnd_st, |
143
|
|
|
memory=False, |
144
|
|
|
print_results=False, |
145
|
|
|
progress_bar=False, |
146
|
|
|
initialize=initialize, |
147
|
|
|
) |
148
|
|
|
|
149
|
|
|
scores.append(opt.best_score) |
150
|
|
|
|
151
|
|
|
score_mean = np.array(scores).mean() |
152
|
|
|
assert min_score_accept < score_mean |
153
|
|
|
|
154
|
|
|
|
155
|
|
View Code Duplication |
def test_SimulatedAnnealingOptimizer_convergence(): |
|
|
|
|
156
|
|
|
scores = [] |
157
|
|
|
for rnd_st in tqdm(range(n_opts)): |
158
|
|
|
opt = SimulatedAnnealingOptimizer(search_space) |
159
|
|
|
opt.search( |
160
|
|
|
objective_function, |
161
|
|
|
n_iter=n_iter, |
162
|
|
|
random_state=rnd_st, |
163
|
|
|
memory=False, |
164
|
|
|
print_results=False, |
165
|
|
|
progress_bar=False, |
166
|
|
|
initialize=initialize, |
167
|
|
|
) |
168
|
|
|
|
169
|
|
|
scores.append(opt.best_score) |
170
|
|
|
|
171
|
|
|
score_mean = np.array(scores).mean() |
172
|
|
|
assert min_score_accept < score_mean |
173
|
|
|
|
174
|
|
|
|
175
|
|
View Code Duplication |
def test_ParallelTemperingOptimizer_convergence(): |
|
|
|
|
176
|
|
|
scores = [] |
177
|
|
|
for rnd_st in tqdm(range(n_opts)): |
178
|
|
|
opt = ParallelTemperingOptimizer(search_space) |
179
|
|
|
opt.search( |
180
|
|
|
objective_function, |
181
|
|
|
n_iter=n_iter, |
182
|
|
|
random_state=rnd_st, |
183
|
|
|
memory=False, |
184
|
|
|
print_results=False, |
185
|
|
|
progress_bar=False, |
186
|
|
|
initialize=initialize, |
187
|
|
|
) |
188
|
|
|
|
189
|
|
|
scores.append(opt.best_score) |
190
|
|
|
|
191
|
|
|
score_mean = np.array(scores).mean() |
192
|
|
|
assert min_score_accept < score_mean |
193
|
|
|
|
194
|
|
|
|
195
|
|
View Code Duplication |
def test_ParticleSwarmOptimizer_convergence(): |
|
|
|
|
196
|
|
|
scores = [] |
197
|
|
|
for rnd_st in tqdm(range(n_opts)): |
198
|
|
|
opt = ParticleSwarmOptimizer(search_space) |
199
|
|
|
opt.search( |
200
|
|
|
objective_function, |
201
|
|
|
n_iter=n_iter, |
202
|
|
|
random_state=rnd_st, |
203
|
|
|
memory=False, |
204
|
|
|
print_results=False, |
205
|
|
|
progress_bar=False, |
206
|
|
|
initialize=initialize, |
207
|
|
|
) |
208
|
|
|
|
209
|
|
|
scores.append(opt.best_score) |
210
|
|
|
|
211
|
|
|
score_mean = np.array(scores).mean() |
212
|
|
|
assert min_score_accept < score_mean |
213
|
|
|
|
214
|
|
|
|
215
|
|
View Code Duplication |
def test_EvolutionStrategyOptimizer_convergence(): |
|
|
|
|
216
|
|
|
scores = [] |
217
|
|
|
for rnd_st in tqdm(range(n_opts)): |
218
|
|
|
opt = EvolutionStrategyOptimizer(search_space) |
219
|
|
|
opt.search( |
220
|
|
|
objective_function, |
221
|
|
|
n_iter=n_iter, |
222
|
|
|
random_state=rnd_st, |
223
|
|
|
memory=False, |
224
|
|
|
print_results=False, |
225
|
|
|
progress_bar=False, |
226
|
|
|
initialize=initialize, |
227
|
|
|
) |
228
|
|
|
|
229
|
|
|
scores.append(opt.best_score) |
230
|
|
|
|
231
|
|
|
score_mean = np.array(scores).mean() |
232
|
|
|
assert min_score_accept < score_mean |
233
|
|
|
|
234
|
|
|
|
235
|
|
View Code Duplication |
def test_BayesianOptimizer_convergence(): |
|
|
|
|
236
|
|
|
scores = [] |
237
|
|
|
for rnd_st in tqdm(range(n_opts)): |
238
|
|
|
opt = BayesianOptimizer(search_space) |
239
|
|
|
opt.search( |
240
|
|
|
objective_function, |
241
|
|
|
n_iter=int(n_iter / 2), |
242
|
|
|
random_state=rnd_st, |
243
|
|
|
memory=False, |
244
|
|
|
print_results=False, |
245
|
|
|
progress_bar=False, |
246
|
|
|
initialize=initialize, |
247
|
|
|
) |
248
|
|
|
|
249
|
|
|
scores.append(opt.best_score) |
250
|
|
|
|
251
|
|
|
score_mean = np.array(scores).mean() |
252
|
|
|
assert min_score_accept < score_mean |
253
|
|
|
|
254
|
|
|
|
255
|
|
View Code Duplication |
def test_TreeStructuredParzenEstimators_convergence(): |
|
|
|
|
256
|
|
|
scores = [] |
257
|
|
|
for rnd_st in tqdm(range(n_opts)): |
258
|
|
|
opt = TreeStructuredParzenEstimators(search_space) |
259
|
|
|
opt.search( |
260
|
|
|
objective_function, |
261
|
|
|
n_iter=int(n_iter / 2), |
262
|
|
|
random_state=rnd_st, |
263
|
|
|
memory=False, |
264
|
|
|
print_results=False, |
265
|
|
|
progress_bar=False, |
266
|
|
|
initialize=initialize, |
267
|
|
|
) |
268
|
|
|
|
269
|
|
|
scores.append(opt.best_score) |
270
|
|
|
|
271
|
|
|
score_mean = np.array(scores).mean() |
272
|
|
|
assert min_score_accept < score_mean |
273
|
|
|
|
274
|
|
|
|
275
|
|
View Code Duplication |
def test_DecisionTreeOptimizer_convergence(): |
|
|
|
|
276
|
|
|
scores = [] |
277
|
|
|
for rnd_st in tqdm(range(n_opts)): |
278
|
|
|
opt = DecisionTreeOptimizer(search_space) |
279
|
|
|
opt.search( |
280
|
|
|
objective_function, |
281
|
|
|
n_iter=int(n_iter / 2), |
282
|
|
|
random_state=rnd_st, |
283
|
|
|
memory=False, |
284
|
|
|
print_results=False, |
285
|
|
|
progress_bar=False, |
286
|
|
|
initialize=initialize, |
287
|
|
|
) |
288
|
|
|
|
289
|
|
|
scores.append(opt.best_score) |
290
|
|
|
|
291
|
|
|
score_mean = np.array(scores).mean() |
292
|
|
|
assert min_score_accept < score_mean |
293
|
|
|
|
294
|
|
|
|
295
|
|
View Code Duplication |
def test_EnsembleOptimizer_convergence(): |
|
|
|
|
296
|
|
|
scores = [] |
297
|
|
|
for rnd_st in tqdm(range(n_opts)): |
298
|
|
|
opt = EnsembleOptimizer(search_space) |
299
|
|
|
opt.search( |
300
|
|
|
objective_function, |
301
|
|
|
n_iter=int(n_iter / 2), |
302
|
|
|
random_state=rnd_st, |
303
|
|
|
memory=False, |
304
|
|
|
print_results=False, |
305
|
|
|
progress_bar=False, |
306
|
|
|
initialize=initialize, |
307
|
|
|
) |
308
|
|
|
|
309
|
|
|
scores.append(opt.best_score) |
310
|
|
|
|
311
|
|
|
score_mean = np.array(scores).mean() |
312
|
|
|
assert min_score_accept < score_mean |
313
|
|
|
|