@@ 30-75 (lines=46) @@ | ||
27 | return ((num / den) + 0) / 1 |
|
28 | ||
29 | ||
30 | class EnsembleOptimizer(SMBO): |
|
31 | name = "Ensemble Optimizer" |
|
32 | ||
33 | def __init__( |
|
34 | self, |
|
35 | *args, |
|
36 | estimators=[ |
|
37 | GradientBoostingRegressor(n_estimators=5), |
|
38 | # DecisionTreeRegressor(), |
|
39 | # MLPRegressor(), |
|
40 | GaussianProcessRegressor(), |
|
41 | ], |
|
42 | xi=0.01, |
|
43 | warm_start_smbo=None, |
|
44 | max_sample_size=10000000, |
|
45 | sampling={"random": 1000000}, |
|
46 | warnings=100000000, |
|
47 | **kwargs |
|
48 | ): |
|
49 | super().__init__(*args, **kwargs) |
|
50 | self.estimators = estimators |
|
51 | self.regr = EnsembleRegressor(estimators) |
|
52 | self.xi = xi |
|
53 | self.warm_start_smbo = warm_start_smbo |
|
54 | self.max_sample_size = max_sample_size |
|
55 | self.sampling = sampling |
|
56 | self.warnings = warnings |
|
57 | ||
58 | self.init_warm_start_smbo() |
|
59 | ||
60 | def _expected_improvement(self): |
|
61 | all_pos_comb = self._all_possible_pos() |
|
62 | self.pos_comb = self._sampling(all_pos_comb) |
|
63 | ||
64 | acqu_func = ExpectedImprovement(self.regr, self.pos_comb, self.xi) |
|
65 | return acqu_func.calculate(self.X_sample, self.Y_sample) |
|
66 | ||
67 | def _training(self): |
|
68 | X_sample = np.array(self.X_sample) |
|
69 | Y_sample = np.array(self.Y_sample) |
|
70 | ||
71 | if len(Y_sample) == 0: |
|
72 | return self.move_random() |
|
73 | ||
74 | Y_sample = normalize(Y_sample).reshape(-1, 1) |
|
75 | self.regr.fit(X_sample, Y_sample) |
|
76 |
@@ 30-71 (lines=42) @@ | ||
27 | return ((num / den) + 0) / 1 |
|
28 | ||
29 | ||
30 | class BayesianOptimizer(SMBO): |
|
31 | name = "Bayesian Optimization" |
|
32 | _name_ = "bayesian_optimization" |
|
33 | ||
34 | def __init__( |
|
35 | self, |
|
36 | *args, |
|
37 | gpr=gaussian_process["gp_nonlinear"], |
|
38 | xi=0.03, |
|
39 | warm_start_smbo=None, |
|
40 | max_sample_size=10000000, |
|
41 | sampling={"random": 1000000}, |
|
42 | warnings=100000000, |
|
43 | **kwargs |
|
44 | ): |
|
45 | super().__init__(*args, **kwargs) |
|
46 | self.gpr = gpr |
|
47 | self.regr = gpr |
|
48 | self.xi = xi |
|
49 | self.warm_start_smbo = warm_start_smbo |
|
50 | self.max_sample_size = max_sample_size |
|
51 | self.sampling = sampling |
|
52 | self.warnings = warnings |
|
53 | ||
54 | self.init_warm_start_smbo() |
|
55 | ||
56 | def _expected_improvement(self): |
|
57 | all_pos_comb = self._all_possible_pos() |
|
58 | self.pos_comb = self._sampling(all_pos_comb) |
|
59 | ||
60 | acqu_func = ExpectedImprovement(self.regr, self.pos_comb, self.xi) |
|
61 | return acqu_func.calculate(self.X_sample, self.Y_sample) |
|
62 | ||
63 | def _training(self): |
|
64 | X_sample = np.array(self.X_sample) |
|
65 | Y_sample = np.array(self.Y_sample) |
|
66 | ||
67 | if len(Y_sample) == 0: |
|
68 | return self.move_random() |
|
69 | ||
70 | Y_sample = normalize(Y_sample).reshape(-1, 1) |
|
71 | self.regr.fit(X_sample, Y_sample) |
|
72 |