Total Complexity | 2 |
Total Lines | 52 |
Duplicated Lines | 0 % |
Changes | 0 |
1 | import pytest |
||
2 | from tqdm import tqdm |
||
3 | import numpy as np |
||
4 | |||
5 | from gradient_free_optimizers import ( |
||
6 | HillClimbingOptimizer, |
||
7 | StochasticHillClimbingOptimizer, |
||
8 | RepulsingHillClimbingOptimizer, |
||
9 | SimulatedAnnealingOptimizer, |
||
10 | ) |
||
11 | |||
12 | |||
13 | opt_local_l = ( |
||
14 | "Optimizer", |
||
15 | [ |
||
16 | (HillClimbingOptimizer), |
||
17 | (StochasticHillClimbingOptimizer), |
||
18 | (RepulsingHillClimbingOptimizer), |
||
19 | (SimulatedAnnealingOptimizer), |
||
20 | ], |
||
21 | ) |
||
22 | |||
23 | |||
24 | @pytest.mark.parametrize(*opt_local_l) |
||
25 | def test_local_perf(Optimizer): |
||
26 | def objective_function(para): |
||
27 | score = -para["x1"] * para["x1"] |
||
28 | return score |
||
29 | |||
30 | search_space = {"x1": np.arange(-100, 101, 1)} |
||
31 | initialize = {"vertices": 2} |
||
32 | |||
33 | n_opts = 33 |
||
34 | n_iter = 100 |
||
35 | |||
36 | scores = [] |
||
37 | for rnd_st in tqdm(range(n_opts)): |
||
38 | opt = Optimizer(search_space, initialize=initialize, random_state=rnd_st) |
||
39 | opt.search( |
||
40 | objective_function, |
||
41 | n_iter=n_iter, |
||
42 | memory=False, |
||
43 | verbosity=False, |
||
44 | ) |
||
45 | |||
46 | scores.append(opt.best_score) |
||
47 | score_mean = np.array(scores).mean() |
||
48 | |||
49 | print("\n score_mean", score_mean) |
||
50 | |||
51 | assert score_mean > -5 |
||
52 |