1
|
|
|
"""Hill climbing optimizer from gfo.""" |
2
|
|
|
# copyright: hyperactive developers, MIT License (see LICENSE file) |
3
|
|
|
|
4
|
|
|
from gradient_free_optimizers import HillClimbingOptimizer |
5
|
|
|
from hyperactive.base import BaseOptimizer |
6
|
|
|
from skbase.utils.stdout_mute import StdoutMute |
7
|
|
|
|
8
|
|
|
|
9
|
|
|
class HillClimbing(BaseOptimizer): |
10
|
|
|
"""Hill climbing optimizer. |
11
|
|
|
|
12
|
|
|
Parameters |
13
|
|
|
---------- |
14
|
|
|
search_space : dict[str, list] |
15
|
|
|
The search space to explore. A dictionary with parameter |
16
|
|
|
names as keys and a numpy array as values. |
17
|
|
|
Optional, can be passed later in ``add_search``. |
18
|
|
|
initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4} |
19
|
|
|
The method to generate initial positions. A dictionary with |
20
|
|
|
the following key literals and the corresponding value type: |
21
|
|
|
{"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
22
|
|
|
constraints : list[callable], default=[] |
23
|
|
|
A list of constraints, where each constraint is a callable. |
24
|
|
|
The callable returns `True` or `False` dependend on the input parameters. |
25
|
|
|
random_state : None, int, default=None |
26
|
|
|
If None, create a new random state. If int, create a new random state |
27
|
|
|
seeded with the value. |
28
|
|
|
rand_rest_p : float, default=0.1 |
29
|
|
|
The probability of a random iteration during the the search process. |
30
|
|
|
epsilon : float, default=0.01 |
31
|
|
|
The step-size for the climbing. |
32
|
|
|
distribution : str, default="normal" |
33
|
|
|
The type of distribution to sample from. |
34
|
|
|
n_neighbours : int, default=10 |
35
|
|
|
The number of neighbours to sample and evaluate before moving to the best |
36
|
|
|
of those neighbours. |
37
|
|
|
n_iter : int, default=100 |
38
|
|
|
The number of iterations to run the optimizer. |
39
|
|
|
verbose : bool, default=False |
40
|
|
|
If True, print the progress of the optimization process. |
41
|
|
|
experiment : BaseExperiment, optional |
42
|
|
|
The experiment to optimize parameters for. |
43
|
|
|
Optional, can be passed later in ``add_search``. |
44
|
|
|
|
45
|
|
|
Examples |
46
|
|
|
-------- |
47
|
|
|
Hill climbing applied to scikit-learn parameter tuning: |
48
|
|
|
|
49
|
|
|
1. defining the experiment to optimize: |
50
|
|
|
>>> from hyperactive.experiment.integrations import SklearnCvExperiment |
51
|
|
|
>>> from sklearn.datasets import load_iris |
52
|
|
|
>>> from sklearn.svm import SVC |
53
|
|
|
>>> |
54
|
|
|
>>> X, y = load_iris(return_X_y=True) |
55
|
|
|
>>> |
56
|
|
|
>>> sklearn_exp = SklearnCvExperiment( |
57
|
|
|
... estimator=SVC(), |
58
|
|
|
... X=X, |
59
|
|
|
... y=y, |
60
|
|
|
... ) |
61
|
|
|
|
62
|
|
|
2. setting up the hill climbing optimizer: |
63
|
|
|
>>> from hyperactive.opt import HillClimbing |
64
|
|
|
>>> import numpy as np |
65
|
|
|
>>> |
66
|
|
|
>>> hillclimbing_config = { |
67
|
|
|
... "search_space": { |
68
|
|
|
... "C": np.array([0.01, 0.1, 1, 10]), |
69
|
|
|
... "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), |
70
|
|
|
... }, |
71
|
|
|
... "n_iter": 100, |
72
|
|
|
... } |
73
|
|
|
>>> hillclimbing = HillClimbing(experiment=sklearn_exp, **hillclimbing_config) |
74
|
|
|
|
75
|
|
|
3. running the hill climbing search: |
76
|
|
|
>>> best_params = hillclimbing.run() |
77
|
|
|
|
78
|
|
|
Best parameters can also be accessed via the attributes: |
79
|
|
|
>>> best_params = hillclimbing.best_params_ |
80
|
|
|
""" |
81
|
|
|
|
82
|
|
|
_tags = { |
83
|
|
|
"python_dependencies": ["gradient-free-optimizers>=1.5.0"], |
84
|
|
|
} |
85
|
|
|
|
86
|
|
|
def __init__( |
87
|
|
|
self, |
88
|
|
|
search_space=None, |
89
|
|
|
initialize=None, |
90
|
|
|
constraints=None, |
91
|
|
|
random_state=None, |
92
|
|
|
rand_rest_p=0.1, |
93
|
|
|
epsilon=0.01, |
94
|
|
|
distribution="normal", |
95
|
|
|
n_neighbours=10, |
96
|
|
|
n_iter=100, |
97
|
|
|
verbose=False, |
98
|
|
|
experiment=None, |
99
|
|
|
): |
100
|
|
|
self.random_state = random_state |
101
|
|
|
self.rand_rest_p = rand_rest_p |
102
|
|
|
self.epsilon = epsilon |
103
|
|
|
self.distribution = distribution |
104
|
|
|
self.n_neighbours = n_neighbours |
105
|
|
|
self.search_space = search_space |
106
|
|
|
self.initialize = initialize |
107
|
|
|
self.constraints = constraints |
108
|
|
|
self.n_iter = n_iter |
109
|
|
|
self.experiment = experiment |
110
|
|
|
self.verbose = verbose |
111
|
|
|
|
112
|
|
|
super().__init__() |
113
|
|
|
|
114
|
|
|
if initialize is None: |
115
|
|
|
self._initialize = {"grid": 4, "random": 2, "vertices": 4} |
116
|
|
|
else: |
117
|
|
|
self._initialize = initialize |
118
|
|
|
|
119
|
|
|
def get_search_config(self): |
120
|
|
|
"""Get the search configuration. |
121
|
|
|
|
122
|
|
|
Returns |
123
|
|
|
------- |
124
|
|
|
dict with str keys |
125
|
|
|
The search configuration dictionary. |
126
|
|
|
""" |
127
|
|
|
search_config = super().get_search_config() |
128
|
|
|
search_config["initialize"] = self._initialize |
129
|
|
|
del search_config["verbose"] |
130
|
|
|
return search_config |
131
|
|
|
|
132
|
|
|
def _run(self, experiment, **search_config): |
133
|
|
|
"""Run the optimization search process. |
134
|
|
|
|
135
|
|
|
Parameters |
136
|
|
|
---------- |
137
|
|
|
experiment : BaseExperiment |
138
|
|
|
The experiment to optimize parameters for. |
139
|
|
|
search_config : dict with str keys |
140
|
|
|
identical to return of ``get_search_config``. |
141
|
|
|
|
142
|
|
|
Returns |
143
|
|
|
------- |
144
|
|
|
dict with str keys |
145
|
|
|
The best parameters found during the search. |
146
|
|
|
Must have keys a subset or identical to experiment.paramnames(). |
147
|
|
|
""" |
148
|
|
|
n_iter = search_config.pop("n_iter", 100) |
149
|
|
|
max_time = search_config.pop("max_time", None) |
150
|
|
|
|
151
|
|
|
hcopt = HillClimbingOptimizer(**search_config) |
152
|
|
|
|
153
|
|
|
with StdoutMute(active=not self.verbose): |
154
|
|
|
hcopt.search( |
155
|
|
|
objective_function=experiment.score, |
156
|
|
|
n_iter=n_iter, |
157
|
|
|
max_time=max_time, |
158
|
|
|
) |
159
|
|
|
best_params = hcopt.best_para |
160
|
|
|
return best_params |
161
|
|
|
|
162
|
|
|
@classmethod |
163
|
|
|
def get_test_params(cls, parameter_set="default"): |
164
|
|
|
"""Return testing parameter settings for the skbase object. |
165
|
|
|
|
166
|
|
|
``get_test_params`` is a unified interface point to store |
167
|
|
|
parameter settings for testing purposes. This function is also |
168
|
|
|
used in ``create_test_instance`` and ``create_test_instances_and_names`` |
169
|
|
|
to construct test instances. |
170
|
|
|
|
171
|
|
|
``get_test_params`` should return a single ``dict``, or a ``list`` of ``dict``. |
172
|
|
|
|
173
|
|
|
Each ``dict`` is a parameter configuration for testing, |
174
|
|
|
and can be used to construct an "interesting" test instance. |
175
|
|
|
A call to ``cls(**params)`` should |
176
|
|
|
be valid for all dictionaries ``params`` in the return of ``get_test_params``. |
177
|
|
|
|
178
|
|
|
The ``get_test_params`` need not return fixed lists of dictionaries, |
179
|
|
|
it can also return dynamic or stochastic parameter settings. |
180
|
|
|
|
181
|
|
|
Parameters |
182
|
|
|
---------- |
183
|
|
|
parameter_set : str, default="default" |
184
|
|
|
Name of the set of test parameters to return, for use in tests. If no |
185
|
|
|
special parameters are defined for a value, will return `"default"` set. |
186
|
|
|
|
187
|
|
|
Returns |
188
|
|
|
------- |
189
|
|
|
params : dict or list of dict, default = {} |
190
|
|
|
Parameters to create testing instances of the class |
191
|
|
|
Each dict are parameters to construct an "interesting" test instance, i.e., |
192
|
|
|
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance. |
193
|
|
|
`create_test_instance` uses the first (or only) dictionary in `params` |
194
|
|
|
""" |
195
|
|
|
import numpy as np |
196
|
|
|
from hyperactive.experiment.integrations import SklearnCvExperiment |
197
|
|
|
|
198
|
|
|
sklearn_exp = SklearnCvExperiment.create_test_instance() |
199
|
|
|
params_sklearn = { |
200
|
|
|
"experiment": sklearn_exp, |
201
|
|
|
"search_space": { |
202
|
|
|
"C": np.array([0.01, 0.1, 1, 10]), |
203
|
|
|
"gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), |
204
|
|
|
}, |
205
|
|
|
"n_iter": 100, |
206
|
|
|
} |
207
|
|
|
|
208
|
|
|
from hyperactive.experiment.toy import Ackley |
209
|
|
|
|
210
|
|
|
ackley_exp = Ackley.create_test_instance() |
211
|
|
|
params_ackley = { |
212
|
|
|
"experiment": ackley_exp, |
213
|
|
|
"search_space": { |
214
|
|
|
"x0": np.linspace(-5, 5, 10), |
215
|
|
|
"x1": np.linspace(-5, 5, 10), |
216
|
|
|
}, |
217
|
|
|
"n_iter": 100, |
218
|
|
|
} |
219
|
|
|
|
220
|
|
|
return [params_sklearn, params_ackley] |
221
|
|
|
|