| @@ 291-308 (lines=18) @@ | ||
| 288 | assert min_score_accept < score_mean |
|
| 289 | ||
| 290 | ||
| 291 | def test_DecisionTreeOptimizer_convergence(): |
|
| 292 | scores = [] |
|
| 293 | for rnd_st in tqdm(range(n_opts)): |
|
| 294 | opt = DecisionTreeOptimizer(search_space) |
|
| 295 | opt.search( |
|
| 296 | objective_function, |
|
| 297 | n_iter=n_iter, |
|
| 298 | random_state=rnd_st, |
|
| 299 | memory=False, |
|
| 300 | print_results=False, |
|
| 301 | progress_bar=False, |
|
| 302 | initialize=initialize, |
|
| 303 | ) |
|
| 304 | ||
| 305 | scores.append(opt.best_score) |
|
| 306 | ||
| 307 | score_mean = np.array(scores).mean() |
|
| 308 | assert min_score_accept < score_mean |
|
| 309 | ||
| 310 | ||
| 311 | test_DecisionTreeOptimizer_convergence() |
|
| @@ 271-288 (lines=18) @@ | ||
| 268 | assert min_score_accept < score_mean |
|
| 269 | ||
| 270 | ||
| 271 | def test_TreeStructuredParzenEstimators_convergence(): |
|
| 272 | scores = [] |
|
| 273 | for rnd_st in tqdm(range(n_opts)): |
|
| 274 | opt = TreeStructuredParzenEstimators(search_space) |
|
| 275 | opt.search( |
|
| 276 | objective_function, |
|
| 277 | n_iter=n_iter, |
|
| 278 | random_state=rnd_st, |
|
| 279 | memory=False, |
|
| 280 | print_results=False, |
|
| 281 | progress_bar=False, |
|
| 282 | initialize=initialize, |
|
| 283 | ) |
|
| 284 | ||
| 285 | scores.append(opt.best_score) |
|
| 286 | ||
| 287 | score_mean = np.array(scores).mean() |
|
| 288 | assert min_score_accept < score_mean |
|
| 289 | ||
| 290 | ||
| 291 | def test_DecisionTreeOptimizer_convergence(): |
|
| @@ 251-268 (lines=18) @@ | ||
| 248 | assert min_score_accept < score_mean |
|
| 249 | ||
| 250 | ||
| 251 | def test_BayesianOptimizer_convergence(): |
|
| 252 | scores = [] |
|
| 253 | for rnd_st in tqdm(range(n_opts)): |
|
| 254 | opt = BayesianOptimizer(search_space) |
|
| 255 | opt.search( |
|
| 256 | objective_function, |
|
| 257 | n_iter=n_iter, |
|
| 258 | random_state=rnd_st, |
|
| 259 | memory=False, |
|
| 260 | print_results=False, |
|
| 261 | progress_bar=False, |
|
| 262 | initialize=initialize, |
|
| 263 | ) |
|
| 264 | ||
| 265 | scores.append(opt.best_score) |
|
| 266 | ||
| 267 | score_mean = np.array(scores).mean() |
|
| 268 | assert min_score_accept < score_mean |
|
| 269 | ||
| 270 | ||
| 271 | def test_TreeStructuredParzenEstimators_convergence(): |
|
| @@ 231-248 (lines=18) @@ | ||
| 228 | assert min_score_accept < score_mean |
|
| 229 | ||
| 230 | ||
| 231 | def test_EvolutionStrategyOptimizer_convergence(): |
|
| 232 | scores = [] |
|
| 233 | for rnd_st in tqdm(range(n_opts)): |
|
| 234 | opt = EvolutionStrategyOptimizer(search_space) |
|
| 235 | opt.search( |
|
| 236 | objective_function, |
|
| 237 | n_iter=n_iter, |
|
| 238 | random_state=rnd_st, |
|
| 239 | memory=False, |
|
| 240 | print_results=False, |
|
| 241 | progress_bar=False, |
|
| 242 | initialize=initialize, |
|
| 243 | ) |
|
| 244 | ||
| 245 | scores.append(opt.best_score) |
|
| 246 | ||
| 247 | score_mean = np.array(scores).mean() |
|
| 248 | assert min_score_accept < score_mean |
|
| 249 | ||
| 250 | ||
| 251 | def test_BayesianOptimizer_convergence(): |
|
| @@ 211-228 (lines=18) @@ | ||
| 208 | assert min_score_accept < score_mean |
|
| 209 | ||
| 210 | ||
| 211 | def test_ParticleSwarmOptimizer_convergence(): |
|
| 212 | scores = [] |
|
| 213 | for rnd_st in tqdm(range(n_opts)): |
|
| 214 | opt = ParticleSwarmOptimizer(search_space) |
|
| 215 | opt.search( |
|
| 216 | objective_function, |
|
| 217 | n_iter=n_iter, |
|
| 218 | random_state=rnd_st, |
|
| 219 | memory=False, |
|
| 220 | print_results=False, |
|
| 221 | progress_bar=False, |
|
| 222 | initialize=initialize, |
|
| 223 | ) |
|
| 224 | ||
| 225 | scores.append(opt.best_score) |
|
| 226 | ||
| 227 | score_mean = np.array(scores).mean() |
|
| 228 | assert min_score_accept < score_mean |
|
| 229 | ||
| 230 | ||
| 231 | def test_EvolutionStrategyOptimizer_convergence(): |
|
| @@ 191-208 (lines=18) @@ | ||
| 188 | assert min_score_accept < score_mean |
|
| 189 | ||
| 190 | ||
| 191 | def test_ParallelTemperingOptimizer_convergence(): |
|
| 192 | scores = [] |
|
| 193 | for rnd_st in tqdm(range(n_opts)): |
|
| 194 | opt = ParallelTemperingOptimizer(search_space) |
|
| 195 | opt.search( |
|
| 196 | objective_function, |
|
| 197 | n_iter=n_iter, |
|
| 198 | random_state=rnd_st, |
|
| 199 | memory=False, |
|
| 200 | print_results=False, |
|
| 201 | progress_bar=False, |
|
| 202 | initialize=initialize, |
|
| 203 | ) |
|
| 204 | ||
| 205 | scores.append(opt.best_score) |
|
| 206 | ||
| 207 | score_mean = np.array(scores).mean() |
|
| 208 | assert min_score_accept < score_mean |
|
| 209 | ||
| 210 | ||
| 211 | def test_ParticleSwarmOptimizer_convergence(): |
|
| @@ 171-188 (lines=18) @@ | ||
| 168 | assert min_score_accept < score_mean |
|
| 169 | ||
| 170 | ||
| 171 | def test_SimulatedAnnealingOptimizer_convergence(): |
|
| 172 | scores = [] |
|
| 173 | for rnd_st in tqdm(range(n_opts)): |
|
| 174 | opt = SimulatedAnnealingOptimizer(search_space) |
|
| 175 | opt.search( |
|
| 176 | objective_function, |
|
| 177 | n_iter=n_iter, |
|
| 178 | random_state=rnd_st, |
|
| 179 | memory=False, |
|
| 180 | print_results=False, |
|
| 181 | progress_bar=False, |
|
| 182 | initialize=initialize, |
|
| 183 | ) |
|
| 184 | ||
| 185 | scores.append(opt.best_score) |
|
| 186 | ||
| 187 | score_mean = np.array(scores).mean() |
|
| 188 | assert min_score_accept < score_mean |
|
| 189 | ||
| 190 | ||
| 191 | def test_ParallelTemperingOptimizer_convergence(): |
|
| @@ 151-168 (lines=18) @@ | ||
| 148 | assert min_score_accept < score_mean |
|
| 149 | ||
| 150 | ||
| 151 | def test_RandomAnnealingOptimizer_convergence(): |
|
| 152 | scores = [] |
|
| 153 | for rnd_st in tqdm(range(n_opts)): |
|
| 154 | opt = RandomAnnealingOptimizer(search_space) |
|
| 155 | opt.search( |
|
| 156 | objective_function, |
|
| 157 | n_iter=n_iter, |
|
| 158 | random_state=rnd_st, |
|
| 159 | memory=False, |
|
| 160 | print_results=False, |
|
| 161 | progress_bar=False, |
|
| 162 | initialize=initialize, |
|
| 163 | ) |
|
| 164 | ||
| 165 | scores.append(opt.best_score) |
|
| 166 | ||
| 167 | score_mean = np.array(scores).mean() |
|
| 168 | assert min_score_accept < score_mean |
|
| 169 | ||
| 170 | ||
| 171 | def test_SimulatedAnnealingOptimizer_convergence(): |
|
| @@ 131-148 (lines=18) @@ | ||
| 128 | assert min_score_accept < score_mean |
|
| 129 | ||
| 130 | ||
| 131 | def test_RandomRestartHillClimbingOptimizer_convergence(): |
|
| 132 | scores = [] |
|
| 133 | for rnd_st in tqdm(range(n_opts)): |
|
| 134 | opt = RandomRestartHillClimbingOptimizer(search_space) |
|
| 135 | opt.search( |
|
| 136 | objective_function, |
|
| 137 | n_iter=n_iter, |
|
| 138 | random_state=rnd_st, |
|
| 139 | memory=False, |
|
| 140 | print_results=False, |
|
| 141 | progress_bar=False, |
|
| 142 | initialize=initialize, |
|
| 143 | ) |
|
| 144 | ||
| 145 | scores.append(opt.best_score) |
|
| 146 | ||
| 147 | score_mean = np.array(scores).mean() |
|
| 148 | assert min_score_accept < score_mean |
|
| 149 | ||
| 150 | ||
| 151 | def test_RandomAnnealingOptimizer_convergence(): |
|
| @@ 111-128 (lines=18) @@ | ||
| 108 | assert min_score_accept < score_mean |
|
| 109 | ||
| 110 | ||
| 111 | def test_RandomSearchOptimizer_convergence(): |
|
| 112 | scores = [] |
|
| 113 | for rnd_st in tqdm(range(n_opts)): |
|
| 114 | opt = RandomSearchOptimizer(search_space) |
|
| 115 | opt.search( |
|
| 116 | objective_function, |
|
| 117 | n_iter=n_iter, |
|
| 118 | random_state=rnd_st, |
|
| 119 | memory=False, |
|
| 120 | print_results=False, |
|
| 121 | progress_bar=False, |
|
| 122 | initialize=initialize, |
|
| 123 | ) |
|
| 124 | ||
| 125 | scores.append(opt.best_score) |
|
| 126 | ||
| 127 | score_mean = np.array(scores).mean() |
|
| 128 | assert min_score_accept < score_mean |
|
| 129 | ||
| 130 | ||
| 131 | def test_RandomRestartHillClimbingOptimizer_convergence(): |
|
| @@ 91-108 (lines=18) @@ | ||
| 88 | assert min_score_accept < score_mean |
|
| 89 | ||
| 90 | ||
| 91 | def test_TabuOptimizer_convergence(): |
|
| 92 | scores = [] |
|
| 93 | for rnd_st in tqdm(range(n_opts)): |
|
| 94 | opt = TabuOptimizer(search_space) |
|
| 95 | opt.search( |
|
| 96 | objective_function, |
|
| 97 | n_iter=n_iter, |
|
| 98 | random_state=rnd_st, |
|
| 99 | memory=False, |
|
| 100 | print_results=False, |
|
| 101 | progress_bar=False, |
|
| 102 | initialize=initialize, |
|
| 103 | ) |
|
| 104 | ||
| 105 | scores.append(opt.best_score) |
|
| 106 | ||
| 107 | score_mean = np.array(scores).mean() |
|
| 108 | assert min_score_accept < score_mean |
|
| 109 | ||
| 110 | ||
| 111 | def test_RandomSearchOptimizer_convergence(): |
|
| @@ 71-88 (lines=18) @@ | ||
| 68 | assert min_score_accept < score_mean |
|
| 69 | ||
| 70 | ||
| 71 | def test_StochasticHillClimbingOptimizer_convergence(): |
|
| 72 | scores = [] |
|
| 73 | for rnd_st in tqdm(range(n_opts)): |
|
| 74 | opt = StochasticHillClimbingOptimizer(search_space) |
|
| 75 | opt.search( |
|
| 76 | objective_function, |
|
| 77 | n_iter=n_iter, |
|
| 78 | random_state=rnd_st, |
|
| 79 | memory=False, |
|
| 80 | print_results=False, |
|
| 81 | progress_bar=False, |
|
| 82 | initialize=initialize, |
|
| 83 | ) |
|
| 84 | ||
| 85 | scores.append(opt.best_score) |
|
| 86 | ||
| 87 | score_mean = np.array(scores).mean() |
|
| 88 | assert min_score_accept < score_mean |
|
| 89 | ||
| 90 | ||
| 91 | def test_TabuOptimizer_convergence(): |
|
| @@ 51-68 (lines=18) @@ | ||
| 48 | min_score_accept = -100 |
|
| 49 | ||
| 50 | ||
| 51 | def test_HillClimbingOptimizer_convergence(): |
|
| 52 | scores = [] |
|
| 53 | for rnd_st in tqdm(range(n_opts)): |
|
| 54 | opt = HillClimbingOptimizer(search_space) |
|
| 55 | opt.search( |
|
| 56 | objective_function, |
|
| 57 | n_iter=n_iter, |
|
| 58 | random_state=rnd_st, |
|
| 59 | memory=False, |
|
| 60 | print_results=False, |
|
| 61 | progress_bar=False, |
|
| 62 | initialize=initialize, |
|
| 63 | ) |
|
| 64 | ||
| 65 | scores.append(opt.best_score) |
|
| 66 | ||
| 67 | score_mean = np.array(scores).mean() |
|
| 68 | assert min_score_accept < score_mean |
|
| 69 | ||
| 70 | ||
| 71 | def test_StochasticHillClimbingOptimizer_convergence(): |
|