Passed
Push — master ( e37ecf...2614a9 )
by Simon
04:19
created

EnsembleOptimizer.finish_initialization()   A

Complexity

Conditions 1

Size

Total Lines 3
Code Lines 3

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
eloc 3
nop 1
dl 0
loc 3
rs 10
c 0
b 0
f 0
1
# Author: Simon Blanke
2
# Email: [email protected]
3
# License: MIT License
4
5
import numpy as np
6
from scipy.stats import norm
7
8
from ..smb_opt.smbo import SMBO
9
from ..smb_opt.surrogate_models import EnsembleRegressor
10
from ..smb_opt.acquisition_function import ExpectedImprovement
11
12
13
from sklearn.tree import DecisionTreeRegressor
14
from sklearn.ensemble import GradientBoostingRegressor
15
from sklearn.svm import SVR
16
from sklearn.gaussian_process import GaussianProcessRegressor
17
from sklearn.neural_network import MLPRegressor
18
19
20
def normalize(array):
21
    num = array - array.min()
22
    den = array.max() - array.min()
23
24
    if den == 0:
25
        return np.random.random_sample(array.shape)
26
    else:
27
        return ((num / den) + 0) / 1
28
29
30
class EnsembleOptimizer(SMBO):
31
    name = "Ensemble Optimizer"
32
33
    def __init__(
34
        self,
35
        *args,
36
        estimators=[
37
            GradientBoostingRegressor(n_estimators=5),
38
            # DecisionTreeRegressor(),
39
            # MLPRegressor(),
40
            GaussianProcessRegressor(),
41
        ],
42
        xi=0.01,
43
        warm_start_smbo=None,
44
        max_sample_size=10000000,
45
        sampling={"random": 1000000},
46
        warnings=100000000,
47
        **kwargs
48
    ):
49
        super().__init__(*args, **kwargs)
50
        self.estimators = estimators
51
        self.regr = EnsembleRegressor(estimators)
52
        self.xi = xi
53
        self.warm_start_smbo = warm_start_smbo
54
        self.max_sample_size = max_sample_size
55
        self.sampling = sampling
56
        self.warnings = warnings
57
58
        self.init_warm_start_smbo()
59
60
    def finish_initialization(self):
61
        self.all_pos_comb = self._all_possible_pos()
62
        return super().finish_initialization()
63
64
    def _expected_improvement(self):
65
        self.pos_comb = self._sampling(self.all_pos_comb)
66
67
        acqu_func = ExpectedImprovement(self.regr, self.pos_comb, self.xi)
68
        return acqu_func.calculate(self.X_sample, self.Y_sample)
69
70
    def _training(self):
71
        X_sample = np.array(self.X_sample)
72
        Y_sample = np.array(self.Y_sample)
73
74
        if len(Y_sample) == 0:
75
            return self.move_random()
76
77
        Y_sample = normalize(Y_sample).reshape(-1, 1)
78
        self.regr.fit(X_sample, Y_sample)
79