@@ 7-41 (lines=35) @@ | ||
4 | from ._parametrize import optimizers_local |
|
5 | ||
6 | ||
7 | @pytest.mark.parametrize(*optimizers_local) |
|
8 | def test_convex_convergence_singleOpt(Optimizer): |
|
9 | def objective_function(para): |
|
10 | score = -(para["x1"] * para["x1"]) |
|
11 | return score |
|
12 | ||
13 | search_space = { |
|
14 | "x1": np.arange(-1000, 1, 1), |
|
15 | } |
|
16 | ||
17 | init1 = { |
|
18 | "x1": -1000, |
|
19 | } |
|
20 | initialize = {"warm_start": [init1]} |
|
21 | ||
22 | n_opts = 33 |
|
23 | ||
24 | scores = [] |
|
25 | for rnd_st in range(n_opts): |
|
26 | opt = Optimizer(search_space, rand_rest_p=1) |
|
27 | opt.search( |
|
28 | objective_function, |
|
29 | n_iter=30, |
|
30 | random_state=rnd_st, |
|
31 | memory=False, |
|
32 | verbosity=False, |
|
33 | initialize=initialize, |
|
34 | ) |
|
35 | ||
36 | scores.append(opt.best_score) |
|
37 | score_mean = np.array(scores).mean() |
|
38 | ||
39 | print("score_mean", score_mean) |
|
40 | ||
41 | assert score_mean > -10000 |
|
42 | ||
43 |
@@ 41-67 (lines=27) @@ | ||
38 | assert score_mean > -25 |
|
39 | ||
40 | ||
41 | @pytest.mark.parametrize(*optimizers_PopBased) |
|
42 | def test_convex_convergence_popBased(Optimizer): |
|
43 | def objective_function(para): |
|
44 | score = -para["x1"] * para["x1"] |
|
45 | return score |
|
46 | ||
47 | search_space = {"x1": np.arange(-100, 101, 1)} |
|
48 | initialize = {"vertices": 2, "grid": 2} |
|
49 | ||
50 | n_opts = 33 |
|
51 | ||
52 | scores = [] |
|
53 | for rnd_st in tqdm(range(n_opts)): |
|
54 | opt = Optimizer(search_space) |
|
55 | opt.search( |
|
56 | objective_function, |
|
57 | n_iter=80, |
|
58 | random_state=rnd_st, |
|
59 | memory=False, |
|
60 | verbosity=False, |
|
61 | initialize=initialize, |
|
62 | ) |
|
63 | ||
64 | scores.append(opt.best_score) |
|
65 | score_mean = np.array(scores).mean() |
|
66 | ||
67 | assert score_mean > -25 |
|
68 | ||
69 | ||
70 | @pytest.mark.parametrize(*optimizers_SBOM) |
|
@@ 12-38 (lines=27) @@ | ||
9 | ) |
|
10 | ||
11 | ||
12 | @pytest.mark.parametrize(*optimizers_singleOpt) |
|
13 | def test_convex_convergence_singleOpt(Optimizer): |
|
14 | def objective_function(para): |
|
15 | score = -para["x1"] * para["x1"] |
|
16 | return score |
|
17 | ||
18 | search_space = {"x1": np.arange(-100, 101, 1)} |
|
19 | initialize = {"vertices": 1} |
|
20 | ||
21 | n_opts = 33 |
|
22 | ||
23 | scores = [] |
|
24 | for rnd_st in tqdm(range(n_opts)): |
|
25 | opt = Optimizer(search_space) |
|
26 | opt.search( |
|
27 | objective_function, |
|
28 | n_iter=100, |
|
29 | random_state=rnd_st, |
|
30 | memory=False, |
|
31 | verbosity=False, |
|
32 | initialize=initialize, |
|
33 | ) |
|
34 | ||
35 | scores.append(opt.best_score) |
|
36 | score_mean = np.array(scores).mean() |
|
37 | ||
38 | assert score_mean > -25 |
|
39 | ||
40 | ||
41 | @pytest.mark.parametrize(*optimizers_PopBased) |