@@ 7-154 (lines=148) @@ | ||
4 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
5 | ||
6 | ||
7 | class HillClimbingRepulsing(_BaseGFOadapter): |
|
8 | """Repulsing hill climbing optimizer. |
|
9 | ||
10 | Parameters |
|
11 | ---------- |
|
12 | search_space : dict[str, list] |
|
13 | The search space to explore. A dictionary with parameter |
|
14 | names as keys and a numpy array as values. |
|
15 | Optional, can be passed later via ``set_params``. |
|
16 | initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4} |
|
17 | The method to generate initial positions. A dictionary with |
|
18 | the following key literals and the corresponding value type: |
|
19 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
20 | constraints : list[callable], default=[] |
|
21 | A list of constraints, where each constraint is a callable. |
|
22 | The callable returns `True` or `False` dependend on the input parameters. |
|
23 | random_state : None, int, default=None |
|
24 | If None, create a new random state. If int, create a new random state |
|
25 | seeded with the value. |
|
26 | rand_rest_p : float, default=0.1 |
|
27 | The probability of a random iteration during the the search process. |
|
28 | epsilon : float, default=0.01 |
|
29 | The step-size for the climbing. |
|
30 | distribution : str, default="normal" |
|
31 | The type of distribution to sample from. |
|
32 | n_neighbours : int, default=10 |
|
33 | The number of neighbours to sample and evaluate before moving to the best |
|
34 | of those neighbours. |
|
35 | repulsion_factor : float, default=5 |
|
36 | The factor to control the repulsion of the hill climbing process. |
|
37 | n_iter : int, default=100 |
|
38 | The number of iterations to run the optimizer. |
|
39 | verbose : bool, default=False |
|
40 | If True, print the progress of the optimization process. |
|
41 | experiment : BaseExperiment, optional |
|
42 | The experiment to optimize parameters for. |
|
43 | Optional, can be passed later via ``set_params``. |
|
44 | ||
45 | Examples |
|
46 | -------- |
|
47 | Hill climbing applied to scikit-learn parameter tuning: |
|
48 | ||
49 | 1. defining the experiment to optimize: |
|
50 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
51 | >>> from sklearn.datasets import load_iris |
|
52 | >>> from sklearn.svm import SVC |
|
53 | >>> |
|
54 | >>> X, y = load_iris(return_X_y=True) |
|
55 | >>> |
|
56 | >>> sklearn_exp = SklearnCvExperiment( |
|
57 | ... estimator=SVC(), |
|
58 | ... X=X, |
|
59 | ... y=y, |
|
60 | ... ) |
|
61 | ||
62 | 2. setting up the hill climbing optimizer: |
|
63 | >>> from hyperactive.opt import HillClimbingRepulsing |
|
64 | >>> import numpy as np |
|
65 | >>> |
|
66 | >>> hc_config = { |
|
67 | ... "search_space": { |
|
68 | ... "C": np.array([0.01, 0.1, 1, 10]), |
|
69 | ... "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), |
|
70 | ... }, |
|
71 | ... "n_iter": 100, |
|
72 | ... } |
|
73 | >>> hillclimbing = HillClimbingRepulsing(experiment=sklearn_exp, **hc_config) |
|
74 | ||
75 | 3. running the hill climbing search: |
|
76 | >>> best_params = hillclimbing.run() |
|
77 | ||
78 | Best parameters can also be accessed via the attributes: |
|
79 | >>> best_params = hillclimbing.best_params_ |
|
80 | """ |
|
81 | ||
82 | _tags = { |
|
83 | "info:name": "Repulsing Hill Climbing", |
|
84 | "info:local_vs_global": "mixed", # "local", "mixed", "global" |
|
85 | "info:explore_vs_exploit": "exploit", # "explore", "exploit", "mixed" |
|
86 | "info:compute": "low", # "low", "middle", "high" |
|
87 | } |
|
88 | ||
89 | def __init__( |
|
90 | self, |
|
91 | search_space=None, |
|
92 | initialize=None, |
|
93 | constraints=None, |
|
94 | random_state=None, |
|
95 | rand_rest_p=0.1, |
|
96 | epsilon=0.01, |
|
97 | distribution="normal", |
|
98 | n_neighbours=10, |
|
99 | repulsion_factor=5, |
|
100 | n_iter=100, |
|
101 | verbose=False, |
|
102 | experiment=None, |
|
103 | ): |
|
104 | self.random_state = random_state |
|
105 | self.rand_rest_p = rand_rest_p |
|
106 | self.epsilon = epsilon |
|
107 | self.distribution = distribution |
|
108 | self.n_neighbours = n_neighbours |
|
109 | self.search_space = search_space |
|
110 | self.initialize = initialize |
|
111 | self.constraints = constraints |
|
112 | self.repulsion_factor = repulsion_factor |
|
113 | self.n_iter = n_iter |
|
114 | self.experiment = experiment |
|
115 | self.verbose = verbose |
|
116 | ||
117 | super().__init__() |
|
118 | ||
119 | def _get_gfo_class(self): |
|
120 | """Get the GFO class to use. |
|
121 | ||
122 | Returns |
|
123 | ------- |
|
124 | class |
|
125 | The GFO class to use. One of the concrete GFO classes |
|
126 | """ |
|
127 | from gradient_free_optimizers import RepulsingHillClimbingOptimizer |
|
128 | ||
129 | return RepulsingHillClimbingOptimizer |
|
130 | ||
131 | @classmethod |
|
132 | def get_test_params(cls, parameter_set="default"): |
|
133 | """Get the test parameters for the optimizer. |
|
134 | ||
135 | Returns |
|
136 | ------- |
|
137 | dict with str keys |
|
138 | The test parameters dictionary. |
|
139 | """ |
|
140 | import numpy as np |
|
141 | ||
142 | params = super().get_test_params() |
|
143 | experiment = params[0]["experiment"] |
|
144 | more_params = { |
|
145 | "experiment": experiment, |
|
146 | "repulsion_factor": 7, |
|
147 | "search_space": { |
|
148 | "C": np.array([0.01, 0.1, 1, 10]), |
|
149 | "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), |
|
150 | }, |
|
151 | "n_iter": 100, |
|
152 | } |
|
153 | params.append(more_params) |
|
154 | return params |
|
155 |
@@ 7-154 (lines=148) @@ | ||
4 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
5 | ||
6 | ||
7 | class HillClimbingStochastic(_BaseGFOadapter): |
|
8 | """Stochastic hill climbing optimizer. |
|
9 | ||
10 | Parameters |
|
11 | ---------- |
|
12 | search_space : dict[str, list] |
|
13 | The search space to explore. A dictionary with parameter |
|
14 | names as keys and a numpy array as values. |
|
15 | Optional, can be passed later via ``set_params``. |
|
16 | initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4} |
|
17 | The method to generate initial positions. A dictionary with |
|
18 | the following key literals and the corresponding value type: |
|
19 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
20 | constraints : list[callable], default=[] |
|
21 | A list of constraints, where each constraint is a callable. |
|
22 | The callable returns `True` or `False` dependend on the input parameters. |
|
23 | random_state : None, int, default=None |
|
24 | If None, create a new random state. If int, create a new random state |
|
25 | seeded with the value. |
|
26 | rand_rest_p : float, default=0.1 |
|
27 | The probability of a random iteration during the the search process. |
|
28 | epsilon : float, default=0.01 |
|
29 | The step-size for the climbing. |
|
30 | distribution : str, default="normal" |
|
31 | The type of distribution to sample from. |
|
32 | n_neighbours : int, default=10 |
|
33 | The number of neighbours to sample and evaluate before moving to the best |
|
34 | of those neighbours. |
|
35 | p_accept : float, default=0.5 |
|
36 | The probability of accepting a transition in the hill climbing process. |
|
37 | n_iter : int, default=100 |
|
38 | The number of iterations to run the optimizer. |
|
39 | verbose : bool, default=False |
|
40 | If True, print the progress of the optimization process. |
|
41 | experiment : BaseExperiment, optional |
|
42 | The experiment to optimize parameters for. |
|
43 | Optional, can be passed later via ``set_params``. |
|
44 | ||
45 | Examples |
|
46 | -------- |
|
47 | Hill climbing applied to scikit-learn parameter tuning: |
|
48 | ||
49 | 1. defining the experiment to optimize: |
|
50 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
51 | >>> from sklearn.datasets import load_iris |
|
52 | >>> from sklearn.svm import SVC |
|
53 | >>> |
|
54 | >>> X, y = load_iris(return_X_y=True) |
|
55 | >>> |
|
56 | >>> sklearn_exp = SklearnCvExperiment( |
|
57 | ... estimator=SVC(), |
|
58 | ... X=X, |
|
59 | ... y=y, |
|
60 | ... ) |
|
61 | ||
62 | 2. setting up the hill climbing optimizer: |
|
63 | >>> from hyperactive.opt import HillClimbingStochastic |
|
64 | >>> import numpy as np |
|
65 | >>> |
|
66 | >>> hc_config = { |
|
67 | ... "search_space": { |
|
68 | ... "C": np.array([0.01, 0.1, 1, 10]), |
|
69 | ... "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), |
|
70 | ... }, |
|
71 | ... "n_iter": 100, |
|
72 | ... } |
|
73 | >>> hillclimbing = HillClimbingStochastic(experiment=sklearn_exp, **hc_config) |
|
74 | ||
75 | 3. running the hill climbing search: |
|
76 | >>> best_params = hillclimbing.run() |
|
77 | ||
78 | Best parameters can also be accessed via the attributes: |
|
79 | >>> best_params = hillclimbing.best_params_ |
|
80 | """ |
|
81 | ||
82 | _tags = { |
|
83 | "info:name": "Hill Climbing", |
|
84 | "info:local_vs_global": "local", # "local", "mixed", "global" |
|
85 | "info:explore_vs_exploit": "exploit", # "explore", "exploit", "mixed" |
|
86 | "info:compute": "low", # "low", "middle", "high" |
|
87 | } |
|
88 | ||
89 | def __init__( |
|
90 | self, |
|
91 | search_space=None, |
|
92 | initialize=None, |
|
93 | constraints=None, |
|
94 | random_state=None, |
|
95 | rand_rest_p=0.1, |
|
96 | epsilon=0.01, |
|
97 | distribution="normal", |
|
98 | n_neighbours=10, |
|
99 | p_accept=0.5, |
|
100 | n_iter=100, |
|
101 | verbose=False, |
|
102 | experiment=None, |
|
103 | ): |
|
104 | self.random_state = random_state |
|
105 | self.rand_rest_p = rand_rest_p |
|
106 | self.epsilon = epsilon |
|
107 | self.distribution = distribution |
|
108 | self.n_neighbours = n_neighbours |
|
109 | self.search_space = search_space |
|
110 | self.initialize = initialize |
|
111 | self.constraints = constraints |
|
112 | self.p_accept = p_accept |
|
113 | self.n_iter = n_iter |
|
114 | self.experiment = experiment |
|
115 | self.verbose = verbose |
|
116 | ||
117 | super().__init__() |
|
118 | ||
119 | def _get_gfo_class(self): |
|
120 | """Get the GFO class to use. |
|
121 | ||
122 | Returns |
|
123 | ------- |
|
124 | class |
|
125 | The GFO class to use. One of the concrete GFO classes |
|
126 | """ |
|
127 | from gradient_free_optimizers import StochasticHillClimbingOptimizer |
|
128 | ||
129 | return StochasticHillClimbingOptimizer |
|
130 | ||
131 | @classmethod |
|
132 | def get_test_params(cls, parameter_set="default"): |
|
133 | """Get the test parameters for the optimizer. |
|
134 | ||
135 | Returns |
|
136 | ------- |
|
137 | dict with str keys |
|
138 | The test parameters dictionary. |
|
139 | """ |
|
140 | import numpy as np |
|
141 | ||
142 | params = super().get_test_params() |
|
143 | experiment = params[0]["experiment"] |
|
144 | more_params = { |
|
145 | "experiment": experiment, |
|
146 | "p_accept": 0.33, |
|
147 | "search_space": { |
|
148 | "C": np.array([0.01, 0.1, 1, 10]), |
|
149 | "gamma": np.array([0.0001, 0.01, 0.1, 1, 10]), |
|
150 | }, |
|
151 | "n_iter": 100, |
|
152 | } |
|
153 | params.append(more_params) |
|
154 | return params |
|
155 |