| @@ 34-79 (lines=46) @@ | ||
| 31 | ) |
|
| 32 | ||
| 33 | ||
| 34 | @pytest.mark.parametrize(*obj_func_l) |
|
| 35 | @pytest.mark.parametrize(*opt_smbo_l) |
|
| 36 | def test_smbo_perf_0(Optimizer, objective_function): |
|
| 37 | search_space = { |
|
| 38 | "x0": np.arange(-30, 101, 1), |
|
| 39 | "x1": np.arange(-100, 31, 1), |
|
| 40 | } |
|
| 41 | initialize = {"vertices": 4, "random": 3} |
|
| 42 | ||
| 43 | n_opts = 10 |
|
| 44 | n_iter = 20 |
|
| 45 | ||
| 46 | scores = [] |
|
| 47 | scores_rnd = [] |
|
| 48 | for rnd_st in tqdm(range(n_opts)): |
|
| 49 | opt = Optimizer(search_space, initialize=initialize, random_state=rnd_st) |
|
| 50 | opt.search( |
|
| 51 | objective_function, |
|
| 52 | n_iter=n_iter, |
|
| 53 | memory=False, |
|
| 54 | verbosity=False, |
|
| 55 | ) |
|
| 56 | ||
| 57 | opt_rnd = RandomSearchOptimizer( |
|
| 58 | search_space, initialize=initialize, random_state=rnd_st |
|
| 59 | ) |
|
| 60 | opt_rnd.search( |
|
| 61 | objective_function, |
|
| 62 | n_iter=n_iter, |
|
| 63 | memory=False, |
|
| 64 | verbosity=False, |
|
| 65 | ) |
|
| 66 | ||
| 67 | scores.append(opt.best_score) |
|
| 68 | scores_rnd.append(opt_rnd.best_score) |
|
| 69 | ||
| 70 | score_mean = np.array(scores).mean() |
|
| 71 | score_mean_rnd = np.array(scores_rnd).mean() |
|
| 72 | ||
| 73 | print("\n score_mean", score_mean) |
|
| 74 | print("\n score_mean_rnd", score_mean_rnd) |
|
| 75 | ||
| 76 | score_norm = (score_mean_rnd - score_mean) / (score_mean_rnd + score_mean) |
|
| 77 | print("\n score_norm", score_norm) |
|
| 78 | ||
| 79 | assert score_norm > 0.3 |
|
| 80 | ||
| @@ 34-79 (lines=46) @@ | ||
| 31 | ) |
|
| 32 | ||
| 33 | ||
| 34 | @pytest.mark.parametrize(*obj_func_l) |
|
| 35 | @pytest.mark.parametrize(*opt_pop_l) |
|
| 36 | def test_pop_perf_0(Optimizer, objective_function): |
|
| 37 | search_space = { |
|
| 38 | "x0": np.arange(-100, 101, 0.1), |
|
| 39 | "x1": np.arange(-100, 101, 0.1), |
|
| 40 | } |
|
| 41 | initialize = {"vertices": 4, "random": 6} |
|
| 42 | ||
| 43 | n_opts = 10 |
|
| 44 | n_iter = 1200 |
|
| 45 | ||
| 46 | scores = [] |
|
| 47 | scores_rnd = [] |
|
| 48 | for rnd_st in tqdm(range(n_opts)): |
|
| 49 | opt = Optimizer(search_space, initialize=initialize, random_state=rnd_st) |
|
| 50 | opt.search( |
|
| 51 | objective_function, |
|
| 52 | n_iter=n_iter, |
|
| 53 | memory=False, |
|
| 54 | verbosity=False, |
|
| 55 | ) |
|
| 56 | ||
| 57 | opt_rnd = RandomSearchOptimizer( |
|
| 58 | search_space, initialize=initialize, random_state=rnd_st |
|
| 59 | ) |
|
| 60 | opt_rnd.search( |
|
| 61 | objective_function, |
|
| 62 | n_iter=n_iter, |
|
| 63 | memory=False, |
|
| 64 | verbosity=False, |
|
| 65 | ) |
|
| 66 | ||
| 67 | scores.append(opt.best_score) |
|
| 68 | scores_rnd.append(opt_rnd.best_score) |
|
| 69 | ||
| 70 | score_mean = np.array(scores).mean() |
|
| 71 | score_mean_rnd = np.array(scores_rnd).mean() |
|
| 72 | ||
| 73 | print("\n score_mean", score_mean) |
|
| 74 | print("\n score_mean_rnd", score_mean_rnd) |
|
| 75 | ||
| 76 | score_norm = (score_mean_rnd - score_mean) / (score_mean_rnd + score_mean) |
|
| 77 | print("\n score_norm", score_norm) |
|
| 78 | ||
| 79 | assert score_norm > 0.1 |
|
| 80 | ||