Completed
Push — master ( f67568...5e5af8 )
by Simon
01:28
created

hyperactive.opt_args   A

Complexity

Total Complexity 11

Size/Duplication

Total Lines 150
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 99
dl 0
loc 150
rs 10
c 0
b 0
f 0
wmc 11

4 Functions

Rating   Name   Duplication   Size   Complexity  
A never_skip_refit() 0 2 1
A skip_refit_75() 0 4 2
A skip_refit_50() 0 4 2
A skip_refit_25() 0 4 2

2 Methods

Rating   Name   Duplication   Size   Complexity  
A Arguments.set_opt_args() 0 49 3
A Arguments.__init__() 0 46 1
1
# Author: Simon Blanke
2
# Email: [email protected]
3
# License: MIT License
4
5
from .util import merge_dicts
6
from numpy.random import normal
7
8
from .optimizers.sequence_model.surrogate_models import (
9
    RandomForestRegressor,
10
    ExtraTreesRegressor,
11
    GPR_linear,
12
    GPR,
13
)
14
15
gaussian_process = {"gp_nonlinear": GPR(), "gp_linear": GPR_linear()}
16
17
tree_regressor = {
18
    "random_forest": RandomForestRegressor(),
19
    "extra_tree": ExtraTreesRegressor(),
20
}
21
22
23
def skip_refit_75(i):
24
    if i <= 33:
25
        return 1
26
    return int((i - 33) ** 0.75)
27
28
29
def skip_refit_50(i):
30
    if i <= 33:
31
        return 1
32
    return int((i - 33) ** 0.5)
33
34
35
def skip_refit_25(i):
36
    if i <= 33:
37
        return 1
38
    return int((i - 33) ** 0.25)
39
40
41
def never_skip_refit(i):
42
    return 1
43
44
45
skip_retrain_ = {
46
    "many": skip_refit_75,
47
    "some": skip_refit_50,
48
    "few": skip_refit_25,
49
    "never": never_skip_refit,
50
}
51
52
53
class Arguments:
54
    def __init__(self, *args, **kwargs):
55
        kwargs_opt = {
56
            # HillClimbingOptimizer
57
            "epsilon": 0.05,
58
            "climb_dist": normal,
59
            "n_neighbours": 1,
60
            # StochasticHillClimbingOptimizer
61
            "p_down": 0.3,
62
            # TabuOptimizer
63
            "tabu_memory": 3,
64
            # RandomRestartHillClimbingOptimizer
65
            "n_restarts": 10,
66
            # RandomAnnealingOptimizer
67
            "epsilon_mod": 10,
68
            "annealing_rate": 0.99,
69
            # SimulatedAnnealingOptimizer
70
            "start_temp": 1,
71
            "norm_factor": "adaptive",
72
            # StochasticTunnelingOptimizer
73
            "gamma": 0.5,
74
            "warm_start_population": False,
75
            # ParallelTemperingOptimizer
76
            "system_temperatures": [0.1, 1, 10, 100],
77
            "n_swaps": 10,
78
            # ParticleSwarmOptimizer
79
            "n_particles": 10,
80
            "inertia": 0.5,
81
            "cognitive_weight": 0.5,
82
            "social_weight": 0.5,
83
            # EvolutionStrategyOptimizer
84
            "individuals": 10,
85
            "mutation_rate": 0.7,
86
            "crossover_rate": 0.3,
87
            # BayesianOptimizer
88
            "max_sample_size": 1000000,
89
            "warm_start_smbo": False,
90
            "xi": 0.01,
91
            "gpr": "gp_nonlinear",
92
            "skip_retrain": "some",
93
            # TreeStructuredParzenEstimators
94
            "start_up_evals": 10,
95
            "gamma_tpe": 0.3,
96
            "tree_regressor": "random_forest",
97
        }
98
99
        self.kwargs_opt = merge_dicts(kwargs_opt, kwargs)
100
101
    def set_opt_args(self, n_iter):
102
        self.epsilon = self.kwargs_opt["epsilon"]
103
        self.climb_dist = self.kwargs_opt["climb_dist"]
104
        self.n_neighbours = self.kwargs_opt["n_neighbours"]
105
106
        self.p_down = self.kwargs_opt["p_down"]
107
108
        self.tabu_memory = self.kwargs_opt["tabu_memory"]
109
110
        self.n_restarts = self.kwargs_opt["n_restarts"]
111
        self.n_iter_restart = int(n_iter / self.n_restarts)
112
113
        self.epsilon_mod = self.kwargs_opt["epsilon_mod"]
114
        self.annealing_rate = self.kwargs_opt["annealing_rate"]
115
        self.start_temp = self.kwargs_opt["start_temp"]
116
        self.norm_factor = self.kwargs_opt["norm_factor"]
117
        self.gamma = self.kwargs_opt["gamma"]
118
119
        self.system_temperatures = self.kwargs_opt["system_temperatures"]
120
        self.n_swaps = self.kwargs_opt["n_swaps"]
121
        self.n_iter_swap = int(n_iter / self.n_swaps)
122
123
        self.n_particles = self.kwargs_opt["n_particles"]
124
        self.inertia = self.kwargs_opt["inertia"]
125
        self.cognitive_weight = self.kwargs_opt["cognitive_weight"]
126
        self.social_weight = self.kwargs_opt["social_weight"]
127
128
        self.individuals = self.kwargs_opt["individuals"]
129
        self.mutation_rate = self.kwargs_opt["mutation_rate"]
130
        self.crossover_rate = self.kwargs_opt["crossover_rate"]
131
132
        self.max_sample_size = self.kwargs_opt["max_sample_size"]
133
        self.warm_start_smbo = self.kwargs_opt["warm_start_smbo"]
134
        self.xi = self.kwargs_opt["xi"]
135
136
        if isinstance(self.kwargs_opt["gpr"], str):
137
            self.gpr = gaussian_process[self.kwargs_opt["gpr"]]
138
        else:
139
            self.gpr = self.kwargs_opt["gpr"]
140
141
        self.skip_retrain = skip_retrain_[self.kwargs_opt["skip_retrain"]]
142
143
        self.start_up_evals = self.kwargs_opt["start_up_evals"]
144
        self.gamma_tpe = self.kwargs_opt["gamma_tpe"]
145
146
        if isinstance(self.kwargs_opt["tree_regressor"], str):
147
            self.tree_regressor = tree_regressor[self.kwargs_opt["tree_regressor"]]
148
        else:
149
            self.tree_regressor = self.kwargs_opt["tree_regressor"]
150