| @@ 4-157 (lines=154) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class ParticleSwarmOptimizer(_BaseGFOadapter): |
|
| 5 | """Particle swarm optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | population : int |
|
| 25 | The number of particles in the swarm. |
|
| 26 | inertia : float |
|
| 27 | The inertia of the swarm. |
|
| 28 | cognitive_weight : float |
|
| 29 | A factor of the movement towards the personal best position of the |
|
| 30 | individual optimizers in the population. |
|
| 31 | social_weight : float |
|
| 32 | A factor of the movement towards the personal best position of the |
|
| 33 | individual optimizers in the population. |
|
| 34 | temp_weight : float |
|
| 35 | The temperature weight of the swarm. |
|
| 36 | n_iter : int, default=100 |
|
| 37 | The number of iterations to run the optimizer. |
|
| 38 | verbose : bool, default=False |
|
| 39 | If True, print the progress of the optimization process. |
|
| 40 | experiment : BaseExperiment, optional |
|
| 41 | The experiment to optimize parameters for. |
|
| 42 | Optional, can be passed later via ``set_params``. |
|
| 43 | ||
| 44 | Examples |
|
| 45 | -------- |
|
| 46 | Basic usage of ParticleSwarmOptimizer with a scikit-learn experiment: |
|
| 47 | ||
| 48 | 1. defining the experiment to optimize: |
|
| 49 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 50 | >>> from sklearn.datasets import load_iris |
|
| 51 | >>> from sklearn.svm import SVC |
|
| 52 | >>> |
|
| 53 | >>> X, y = load_iris(return_X_y=True) |
|
| 54 | >>> |
|
| 55 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 56 | ... estimator=SVC(), |
|
| 57 | ... X=X, |
|
| 58 | ... y=y, |
|
| 59 | ... ) |
|
| 60 | ||
| 61 | 2. setting up the particleSwarmOptimizer optimizer: |
|
| 62 | >>> from hyperactive.opt import ParticleSwarmOptimizer |
|
| 63 | >>> import numpy as np |
|
| 64 | >>> |
|
| 65 | >>> config = { |
|
| 66 | ... "search_space": { |
|
| 67 | ... "C": [0.01, 0.1, 1, 10], |
|
| 68 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 69 | ... }, |
|
| 70 | ... "n_iter": 100, |
|
| 71 | ... } |
|
| 72 | >>> optimizer = ParticleSwarmOptimizer(experiment=sklearn_exp, **config) |
|
| 73 | ||
| 74 | 3. running the optimization: |
|
| 75 | >>> best_params = optimizer.solve() |
|
| 76 | ||
| 77 | Best parameters can also be accessed via: |
|
| 78 | >>> best_params = optimizer.best_params_ |
|
| 79 | """ |
|
| 80 | ||
| 81 | _tags = { |
|
| 82 | "info:name": "Particle Swarm Optimization", |
|
| 83 | "info:local_vs_global": "global", |
|
| 84 | "info:explore_vs_exploit": "explore", |
|
| 85 | "info:compute": "middle", |
|
| 86 | } |
|
| 87 | ||
| 88 | def __init__( |
|
| 89 | self, |
|
| 90 | search_space=None, |
|
| 91 | initialize=None, |
|
| 92 | constraints=None, |
|
| 93 | random_state=None, |
|
| 94 | rand_rest_p=0.1, |
|
| 95 | population=10, |
|
| 96 | inertia=0.5, |
|
| 97 | cognitive_weight=0.5, |
|
| 98 | social_weight=0.5, |
|
| 99 | temp_weight=0.2, |
|
| 100 | n_iter=100, |
|
| 101 | verbose=False, |
|
| 102 | experiment=None, |
|
| 103 | ): |
|
| 104 | self.random_state = random_state |
|
| 105 | self.rand_rest_p = rand_rest_p |
|
| 106 | self.population = population |
|
| 107 | self.inertia = inertia |
|
| 108 | self.cognitive_weight = cognitive_weight |
|
| 109 | self.social_weight = social_weight |
|
| 110 | self.temp_weight = temp_weight |
|
| 111 | self.search_space = search_space |
|
| 112 | self.initialize = initialize |
|
| 113 | self.constraints = constraints |
|
| 114 | self.n_iter = n_iter |
|
| 115 | self.experiment = experiment |
|
| 116 | self.verbose = verbose |
|
| 117 | ||
| 118 | super().__init__() |
|
| 119 | ||
| 120 | def _get_gfo_class(self): |
|
| 121 | """Get the GFO class to use. |
|
| 122 | ||
| 123 | Returns |
|
| 124 | ------- |
|
| 125 | class |
|
| 126 | The GFO class to use. One of the concrete GFO classes |
|
| 127 | """ |
|
| 128 | from gradient_free_optimizers import ParticleSwarmOptimizer |
|
| 129 | ||
| 130 | return ParticleSwarmOptimizer |
|
| 131 | ||
| 132 | @classmethod |
|
| 133 | def get_test_params(cls, parameter_set="default"): |
|
| 134 | """Get the test parameters for the optimizer. |
|
| 135 | ||
| 136 | Returns |
|
| 137 | ------- |
|
| 138 | dict with str keys |
|
| 139 | The test parameters dictionary. |
|
| 140 | """ |
|
| 141 | params = super().get_test_params() |
|
| 142 | experiment = params[0]["experiment"] |
|
| 143 | more_params = { |
|
| 144 | "experiment": experiment, |
|
| 145 | "population": 15, |
|
| 146 | "inertia": 0.9, |
|
| 147 | "cognitive_weight": 0.9, |
|
| 148 | "social_weight": 0.9, |
|
| 149 | "temp_weight": 0.9, |
|
| 150 | "search_space": { |
|
| 151 | "C": [0.01, 0.1, 1, 10], |
|
| 152 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 153 | }, |
|
| 154 | "n_iter": 100, |
|
| 155 | } |
|
| 156 | params.append(more_params) |
|
| 157 | return params |
|
| 158 | ||
| @@ 4-157 (lines=154) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class EvolutionStrategy(_BaseGFOadapter): |
|
| 5 | """Evolution strategy optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | population : int |
|
| 25 | The number of individuals in the population. |
|
| 26 | offspring : int |
|
| 27 | The number of offspring to generate in each generation. |
|
| 28 | replace_parents : bool |
|
| 29 | If True, the parents are replaced with the offspring in the next |
|
| 30 | generation. If False, the parents are kept in the next generation and the |
|
| 31 | offspring are added to the population. |
|
| 32 | mutation_rate : float |
|
| 33 | The mutation rate for the mutation operator. |
|
| 34 | crossover_rate : float |
|
| 35 | The crossover rate for the crossover operator. |
|
| 36 | n_iter : int, default=100 |
|
| 37 | The number of iterations to run the optimizer. |
|
| 38 | verbose : bool, default=False |
|
| 39 | If True, print the progress of the optimization process. |
|
| 40 | experiment : BaseExperiment, optional |
|
| 41 | The experiment to optimize parameters for. |
|
| 42 | Optional, can be passed later via ``set_params``. |
|
| 43 | ||
| 44 | Examples |
|
| 45 | -------- |
|
| 46 | Basic usage of EvolutionStrategy with a scikit-learn experiment: |
|
| 47 | ||
| 48 | 1. defining the experiment to optimize: |
|
| 49 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 50 | >>> from sklearn.datasets import load_iris |
|
| 51 | >>> from sklearn.svm import SVC |
|
| 52 | >>> |
|
| 53 | >>> X, y = load_iris(return_X_y=True) |
|
| 54 | >>> |
|
| 55 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 56 | ... estimator=SVC(), |
|
| 57 | ... X=X, |
|
| 58 | ... y=y, |
|
| 59 | ... ) |
|
| 60 | ||
| 61 | 2. setting up the evolutionStrategy optimizer: |
|
| 62 | >>> from hyperactive.opt import EvolutionStrategy |
|
| 63 | >>> import numpy as np |
|
| 64 | >>> |
|
| 65 | >>> config = { |
|
| 66 | ... "search_space": { |
|
| 67 | ... "C": [0.01, 0.1, 1, 10], |
|
| 68 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 69 | ... }, |
|
| 70 | ... "n_iter": 100, |
|
| 71 | ... } |
|
| 72 | >>> optimizer = EvolutionStrategy(experiment=sklearn_exp, **config) |
|
| 73 | ||
| 74 | 3. running the optimization: |
|
| 75 | >>> best_params = optimizer.solve() |
|
| 76 | ||
| 77 | Best parameters can also be accessed via: |
|
| 78 | >>> best_params = optimizer.best_params_ |
|
| 79 | """ |
|
| 80 | ||
| 81 | _tags = { |
|
| 82 | "info:name": "Evolution Strategy", |
|
| 83 | "info:local_vs_global": "global", |
|
| 84 | "info:explore_vs_exploit": "explore", |
|
| 85 | "info:compute": "middle", |
|
| 86 | } |
|
| 87 | ||
| 88 | def __init__( |
|
| 89 | self, |
|
| 90 | search_space=None, |
|
| 91 | initialize=None, |
|
| 92 | constraints=None, |
|
| 93 | random_state=None, |
|
| 94 | rand_rest_p=0.1, |
|
| 95 | population=10, |
|
| 96 | offspring=20, |
|
| 97 | replace_parents=False, |
|
| 98 | mutation_rate=0.7, |
|
| 99 | crossover_rate=0.3, |
|
| 100 | n_iter=100, |
|
| 101 | verbose=False, |
|
| 102 | experiment=None, |
|
| 103 | ): |
|
| 104 | self.random_state = random_state |
|
| 105 | self.rand_rest_p = rand_rest_p |
|
| 106 | self.population = population |
|
| 107 | self.offspring = offspring |
|
| 108 | self.replace_parents = replace_parents |
|
| 109 | self.mutation_rate = mutation_rate |
|
| 110 | self.crossover_rate = crossover_rate |
|
| 111 | self.search_space = search_space |
|
| 112 | self.initialize = initialize |
|
| 113 | self.constraints = constraints |
|
| 114 | self.n_iter = n_iter |
|
| 115 | self.experiment = experiment |
|
| 116 | self.verbose = verbose |
|
| 117 | ||
| 118 | super().__init__() |
|
| 119 | ||
| 120 | def _get_gfo_class(self): |
|
| 121 | """Get the GFO class to use. |
|
| 122 | ||
| 123 | Returns |
|
| 124 | ------- |
|
| 125 | class |
|
| 126 | The GFO class to use. One of the concrete GFO classes |
|
| 127 | """ |
|
| 128 | from gradient_free_optimizers import EvolutionStrategyOptimizer |
|
| 129 | ||
| 130 | return EvolutionStrategyOptimizer |
|
| 131 | ||
| 132 | @classmethod |
|
| 133 | def get_test_params(cls, parameter_set="default"): |
|
| 134 | """Get the test parameters for the optimizer. |
|
| 135 | ||
| 136 | Returns |
|
| 137 | ------- |
|
| 138 | dict with str keys |
|
| 139 | The test parameters dictionary. |
|
| 140 | """ |
|
| 141 | params = super().get_test_params() |
|
| 142 | experiment = params[0]["experiment"] |
|
| 143 | more_params = { |
|
| 144 | "experiment": experiment, |
|
| 145 | "population": 15, |
|
| 146 | "offspring": 10, |
|
| 147 | "replace_parents": True, |
|
| 148 | "mutation_rate": 1, |
|
| 149 | "crossover_rate": 2, |
|
| 150 | "search_space": { |
|
| 151 | "C": [0.01, 0.1, 1, 10], |
|
| 152 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 153 | }, |
|
| 154 | "n_iter": 100, |
|
| 155 | } |
|
| 156 | params.append(more_params) |
|
| 157 | return params |
|
| 158 | ||
| @@ 4-150 (lines=147) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class DownhillSimplexOptimizer(_BaseGFOadapter): |
|
| 5 | """Downhill simplex optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | alpha : float |
|
| 25 | The reflection parameter of the simplex algorithm. |
|
| 26 | gamma : float |
|
| 27 | The expansion parameter of the simplex algorithm. |
|
| 28 | beta : float |
|
| 29 | The contraction parameter of the simplex algorithm. |
|
| 30 | sigma : float |
|
| 31 | The shrinking parameter of the simplex algorithm. |
|
| 32 | n_iter : int, default=100 |
|
| 33 | The number of iterations to run the optimizer. |
|
| 34 | verbose : bool, default=False |
|
| 35 | If True, print the progress of the optimization process. |
|
| 36 | experiment : BaseExperiment, optional |
|
| 37 | The experiment to optimize parameters for. |
|
| 38 | Optional, can be passed later via ``set_params``. |
|
| 39 | ||
| 40 | Examples |
|
| 41 | -------- |
|
| 42 | Basic usage of DownhillSimplexOptimizer with a scikit-learn experiment: |
|
| 43 | ||
| 44 | 1. defining the experiment to optimize: |
|
| 45 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 46 | >>> from sklearn.datasets import load_iris |
|
| 47 | >>> from sklearn.svm import SVC |
|
| 48 | >>> |
|
| 49 | >>> X, y = load_iris(return_X_y=True) |
|
| 50 | >>> |
|
| 51 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 52 | ... estimator=SVC(), |
|
| 53 | ... X=X, |
|
| 54 | ... y=y, |
|
| 55 | ... ) |
|
| 56 | ||
| 57 | 2. setting up the downhillSimplexOptimizer optimizer: |
|
| 58 | >>> from hyperactive.opt import DownhillSimplexOptimizer |
|
| 59 | >>> import numpy as np |
|
| 60 | >>> |
|
| 61 | >>> config = { |
|
| 62 | ... "search_space": { |
|
| 63 | ... "C": [0.01, 0.1, 1, 10], |
|
| 64 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 65 | ... }, |
|
| 66 | ... "n_iter": 100, |
|
| 67 | ... } |
|
| 68 | >>> optimizer = DownhillSimplexOptimizer(experiment=sklearn_exp, **config) |
|
| 69 | ||
| 70 | 3. running the optimization: |
|
| 71 | >>> best_params = optimizer.solve() |
|
| 72 | ||
| 73 | Best parameters can also be accessed via: |
|
| 74 | >>> best_params = optimizer.best_params_ |
|
| 75 | """ |
|
| 76 | ||
| 77 | _tags = { |
|
| 78 | "info:name": "Downhill Simplex", |
|
| 79 | "info:local_vs_global": "local", |
|
| 80 | "info:explore_vs_exploit": "exploit", |
|
| 81 | "info:compute": "low", |
|
| 82 | } |
|
| 83 | ||
| 84 | def __init__( |
|
| 85 | self, |
|
| 86 | search_space=None, |
|
| 87 | initialize=None, |
|
| 88 | constraints=None, |
|
| 89 | random_state=None, |
|
| 90 | rand_rest_p=0.1, |
|
| 91 | alpha=1, |
|
| 92 | gamma=2, |
|
| 93 | beta=0.5, |
|
| 94 | sigma=0.5, |
|
| 95 | n_iter=100, |
|
| 96 | verbose=False, |
|
| 97 | experiment=None, |
|
| 98 | ): |
|
| 99 | self.random_state = random_state |
|
| 100 | self.rand_rest_p = rand_rest_p |
|
| 101 | self.alpha = alpha |
|
| 102 | self.gamma = gamma |
|
| 103 | self.beta = beta |
|
| 104 | self.sigma = sigma |
|
| 105 | self.search_space = search_space |
|
| 106 | self.initialize = initialize |
|
| 107 | self.constraints = constraints |
|
| 108 | self.n_iter = n_iter |
|
| 109 | self.experiment = experiment |
|
| 110 | self.verbose = verbose |
|
| 111 | ||
| 112 | super().__init__() |
|
| 113 | ||
| 114 | def _get_gfo_class(self): |
|
| 115 | """Get the GFO class to use. |
|
| 116 | ||
| 117 | Returns |
|
| 118 | ------- |
|
| 119 | class |
|
| 120 | The GFO class to use. One of the concrete GFO classes |
|
| 121 | """ |
|
| 122 | from gradient_free_optimizers import DownhillSimplexOptimizer |
|
| 123 | ||
| 124 | return DownhillSimplexOptimizer |
|
| 125 | ||
| 126 | @classmethod |
|
| 127 | def get_test_params(cls, parameter_set="default"): |
|
| 128 | """Get the test parameters for the optimizer. |
|
| 129 | ||
| 130 | Returns |
|
| 131 | ------- |
|
| 132 | dict with str keys |
|
| 133 | The test parameters dictionary. |
|
| 134 | """ |
|
| 135 | params = super().get_test_params() |
|
| 136 | experiment = params[0]["experiment"] |
|
| 137 | more_params = { |
|
| 138 | "experiment": experiment, |
|
| 139 | "alpha": 0.33, |
|
| 140 | "beta": 0.33, |
|
| 141 | "gamma": 0.33, |
|
| 142 | "sigma": 0.33, |
|
| 143 | "search_space": { |
|
| 144 | "C": [0.01, 0.1, 1, 10], |
|
| 145 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 146 | }, |
|
| 147 | "n_iter": 100, |
|
| 148 | } |
|
| 149 | params.append(more_params) |
|
| 150 | return params |
|
| 151 | ||
| @@ 4-146 (lines=143) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class PatternSearch(_BaseGFOadapter): |
|
| 5 | """Pattern search optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | n_positions : int |
|
| 25 | Number of positions that the pattern consists of. |
|
| 26 | pattern_size : float |
|
| 27 | The initial size of the patterns in percentage of the size of the search |
|
| 28 | space in the corresponding dimension. |
|
| 29 | reduction : float |
|
| 30 | The factor that reduces the size of the pattern if no better position is found. |
|
| 31 | n_iter : int, default=100 |
|
| 32 | The number of iterations to run the optimizer. |
|
| 33 | verbose : bool, default=False |
|
| 34 | If True, print the progress of the optimization process. |
|
| 35 | experiment : BaseExperiment, optional |
|
| 36 | The experiment to optimize parameters for. |
|
| 37 | Optional, can be passed later via ``set_params``. |
|
| 38 | ||
| 39 | Examples |
|
| 40 | -------- |
|
| 41 | Basic usage of PatternSearch with a scikit-learn experiment: |
|
| 42 | ||
| 43 | 1. defining the experiment to optimize: |
|
| 44 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 45 | >>> from sklearn.datasets import load_iris |
|
| 46 | >>> from sklearn.svm import SVC |
|
| 47 | >>> |
|
| 48 | >>> X, y = load_iris(return_X_y=True) |
|
| 49 | >>> |
|
| 50 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 51 | ... estimator=SVC(), |
|
| 52 | ... X=X, |
|
| 53 | ... y=y, |
|
| 54 | ... ) |
|
| 55 | ||
| 56 | 2. setting up the patternSearch optimizer: |
|
| 57 | >>> from hyperactive.opt import PatternSearch |
|
| 58 | >>> import numpy as np |
|
| 59 | >>> |
|
| 60 | >>> config = { |
|
| 61 | ... "search_space": { |
|
| 62 | ... "C": [0.01, 0.1, 1, 10], |
|
| 63 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 64 | ... }, |
|
| 65 | ... "n_iter": 100, |
|
| 66 | ... } |
|
| 67 | >>> optimizer = PatternSearch(experiment=sklearn_exp, **config) |
|
| 68 | ||
| 69 | 3. running the optimization: |
|
| 70 | >>> best_params = optimizer.solve() |
|
| 71 | ||
| 72 | Best parameters can also be accessed via: |
|
| 73 | >>> best_params = optimizer.best_params_ |
|
| 74 | """ |
|
| 75 | ||
| 76 | _tags = { |
|
| 77 | "info:name": "Pattern Search", |
|
| 78 | "info:local_vs_global": "local", |
|
| 79 | "info:explore_vs_exploit": "explore", |
|
| 80 | "info:compute": "middle", |
|
| 81 | } |
|
| 82 | ||
| 83 | def __init__( |
|
| 84 | self, |
|
| 85 | search_space=None, |
|
| 86 | initialize=None, |
|
| 87 | constraints=None, |
|
| 88 | random_state=None, |
|
| 89 | rand_rest_p=0.1, |
|
| 90 | n_positions=4, |
|
| 91 | pattern_size=0.25, |
|
| 92 | reduction=0.9, |
|
| 93 | n_iter=100, |
|
| 94 | verbose=False, |
|
| 95 | experiment=None, |
|
| 96 | ): |
|
| 97 | self.random_state = random_state |
|
| 98 | self.rand_rest_p = rand_rest_p |
|
| 99 | self.n_positions = n_positions |
|
| 100 | self.pattern_size = pattern_size |
|
| 101 | self.reduction = reduction |
|
| 102 | self.search_space = search_space |
|
| 103 | self.initialize = initialize |
|
| 104 | self.constraints = constraints |
|
| 105 | self.n_iter = n_iter |
|
| 106 | self.experiment = experiment |
|
| 107 | self.verbose = verbose |
|
| 108 | ||
| 109 | super().__init__() |
|
| 110 | ||
| 111 | def _get_gfo_class(self): |
|
| 112 | """Get the GFO class to use. |
|
| 113 | ||
| 114 | Returns |
|
| 115 | ------- |
|
| 116 | class |
|
| 117 | The GFO class to use. One of the concrete GFO classes |
|
| 118 | """ |
|
| 119 | from gradient_free_optimizers import PatternSearch |
|
| 120 | ||
| 121 | return PatternSearch |
|
| 122 | ||
| 123 | @classmethod |
|
| 124 | def get_test_params(cls, parameter_set="default"): |
|
| 125 | """Get the test parameters for the optimizer. |
|
| 126 | ||
| 127 | Returns |
|
| 128 | ------- |
|
| 129 | dict with str keys |
|
| 130 | The test parameters dictionary. |
|
| 131 | """ |
|
| 132 | params = super().get_test_params() |
|
| 133 | experiment = params[0]["experiment"] |
|
| 134 | more_params = { |
|
| 135 | "experiment": experiment, |
|
| 136 | "n_positions": 3, |
|
| 137 | "pattern_size": 0.5, |
|
| 138 | "reduction": 0.999, |
|
| 139 | "search_space": { |
|
| 140 | "C": [0.01, 0.1, 1, 10], |
|
| 141 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 142 | }, |
|
| 143 | "n_iter": 100, |
|
| 144 | } |
|
| 145 | params.append(more_params) |
|
| 146 | return params |
|
| 147 | ||
| @@ 4-145 (lines=142) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class SpiralOptimization(_BaseGFOadapter): |
|
| 5 | """Spiral optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | Optional, can be passed later via ``set_params``. |
|
| 13 | initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4} |
|
| 14 | The method to generate initial positions. A dictionary with |
|
| 15 | the following key literals and the corresponding value type: |
|
| 16 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 17 | constraints : list[callable], default=[] |
|
| 18 | A list of constraints, where each constraint is a callable. |
|
| 19 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 20 | random_state : None, int, default=None |
|
| 21 | If None, create a new random state. If int, create a new random state |
|
| 22 | seeded with the value. |
|
| 23 | rand_rest_p : float, default=0.1 |
|
| 24 | The probability of a random iteration during the the search process. |
|
| 25 | population : int |
|
| 26 | The number of particles in the swarm. |
|
| 27 | decay_rate : float |
|
| 28 | This parameter is a factor, that influences the radius of the particles |
|
| 29 | during their spiral movement. |
|
| 30 | Lower values accelerates the convergence of the particles to the best |
|
| 31 | known position, while values above 1 eventually lead to a movement where |
|
| 32 | the particles spiral away from each other. |
|
| 33 | n_iter : int, default=100 |
|
| 34 | The number of iterations to run the optimizer. |
|
| 35 | verbose : bool, default=False |
|
| 36 | If True, print the progress of the optimization process. |
|
| 37 | experiment : BaseExperiment, optional |
|
| 38 | The experiment to optimize parameters for. |
|
| 39 | Optional, can be passed later via ``set_params``. |
|
| 40 | ||
| 41 | Examples |
|
| 42 | -------- |
|
| 43 | Basic usage of SpiralOptimization with a scikit-learn experiment: |
|
| 44 | ||
| 45 | 1. defining the experiment to optimize: |
|
| 46 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 47 | >>> from sklearn.datasets import load_iris |
|
| 48 | >>> from sklearn.svm import SVC |
|
| 49 | >>> |
|
| 50 | >>> X, y = load_iris(return_X_y=True) |
|
| 51 | >>> |
|
| 52 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 53 | ... estimator=SVC(), |
|
| 54 | ... X=X, |
|
| 55 | ... y=y, |
|
| 56 | ... ) |
|
| 57 | ||
| 58 | 2. setting up the spiralOptimization optimizer: |
|
| 59 | >>> from hyperactive.opt import SpiralOptimization |
|
| 60 | >>> import numpy as np |
|
| 61 | >>> |
|
| 62 | >>> config = { |
|
| 63 | ... "search_space": { |
|
| 64 | ... "C": [0.01, 0.1, 1, 10], |
|
| 65 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 66 | ... }, |
|
| 67 | ... "n_iter": 100, |
|
| 68 | ... } |
|
| 69 | >>> optimizer = SpiralOptimization(experiment=sklearn_exp, **config) |
|
| 70 | ||
| 71 | 3. running the optimization: |
|
| 72 | >>> best_params = optimizer.solve() |
|
| 73 | ||
| 74 | Best parameters can also be accessed via: |
|
| 75 | >>> best_params = optimizer.best_params_ |
|
| 76 | """ |
|
| 77 | ||
| 78 | _tags = { |
|
| 79 | "info:name": "Spiral Optimization", |
|
| 80 | "info:local_vs_global": "mixed", |
|
| 81 | "info:explore_vs_exploit": "explore", |
|
| 82 | "info:compute": "middle", |
|
| 83 | } |
|
| 84 | ||
| 85 | def __init__( |
|
| 86 | self, |
|
| 87 | search_space=None, |
|
| 88 | initialize=None, |
|
| 89 | constraints=None, |
|
| 90 | random_state=None, |
|
| 91 | rand_rest_p=0.1, |
|
| 92 | population: int = 10, |
|
| 93 | decay_rate: float = 0.99, |
|
| 94 | n_iter=100, |
|
| 95 | verbose=False, |
|
| 96 | experiment=None, |
|
| 97 | ): |
|
| 98 | self.random_state = random_state |
|
| 99 | self.rand_rest_p = rand_rest_p |
|
| 100 | self.population = population |
|
| 101 | self.decay_rate = decay_rate |
|
| 102 | self.search_space = search_space |
|
| 103 | self.initialize = initialize |
|
| 104 | self.constraints = constraints |
|
| 105 | self.n_iter = n_iter |
|
| 106 | self.experiment = experiment |
|
| 107 | self.verbose = verbose |
|
| 108 | ||
| 109 | super().__init__() |
|
| 110 | ||
| 111 | def _get_gfo_class(self): |
|
| 112 | """Get the GFO class to use. |
|
| 113 | ||
| 114 | Returns |
|
| 115 | ------- |
|
| 116 | class |
|
| 117 | The GFO class to use. One of the concrete GFO classes |
|
| 118 | """ |
|
| 119 | from gradient_free_optimizers import SpiralOptimization |
|
| 120 | ||
| 121 | return SpiralOptimization |
|
| 122 | ||
| 123 | @classmethod |
|
| 124 | def get_test_params(cls, parameter_set="default"): |
|
| 125 | """Get the test parameters for the optimizer. |
|
| 126 | ||
| 127 | Returns |
|
| 128 | ------- |
|
| 129 | dict with str keys |
|
| 130 | The test parameters dictionary. |
|
| 131 | """ |
|
| 132 | params = super().get_test_params() |
|
| 133 | experiment = params[0]["experiment"] |
|
| 134 | more_params = { |
|
| 135 | "experiment": experiment, |
|
| 136 | "population": 20, |
|
| 137 | "decay_rate": 0.9999, |
|
| 138 | "search_space": { |
|
| 139 | "C": [0.01, 0.1, 1, 10], |
|
| 140 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 141 | }, |
|
| 142 | "n_iter": 100, |
|
| 143 | } |
|
| 144 | params.append(more_params) |
|
| 145 | return params |
|
| 146 | ||
| @@ 4-145 (lines=142) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class DifferentialEvolution(_BaseGFOadapter): |
|
| 5 | """Differential evolution optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | population : int |
|
| 25 | The number of individuals in the population. |
|
| 26 | mutation_rate : float |
|
| 27 | The mutation rate. |
|
| 28 | crossover_rate : float |
|
| 29 | The crossover rate. |
|
| 30 | n_iter : int, default=100 |
|
| 31 | The number of iterations to run the optimizer. |
|
| 32 | verbose : bool, default=False |
|
| 33 | If True, print the progress of the optimization process. |
|
| 34 | experiment : BaseExperiment, optional |
|
| 35 | The experiment to optimize parameters for. |
|
| 36 | Optional, can be passed later via ``set_params``. |
|
| 37 | ||
| 38 | Examples |
|
| 39 | -------- |
|
| 40 | Basic usage of DifferentialEvolution with a scikit-learn experiment: |
|
| 41 | ||
| 42 | 1. defining the experiment to optimize: |
|
| 43 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 44 | >>> from sklearn.datasets import load_iris |
|
| 45 | >>> from sklearn.svm import SVC |
|
| 46 | >>> |
|
| 47 | >>> X, y = load_iris(return_X_y=True) |
|
| 48 | >>> |
|
| 49 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 50 | ... estimator=SVC(), |
|
| 51 | ... X=X, |
|
| 52 | ... y=y, |
|
| 53 | ... ) |
|
| 54 | ||
| 55 | 2. setting up the differentialEvolution optimizer: |
|
| 56 | >>> from hyperactive.opt import DifferentialEvolution |
|
| 57 | >>> import numpy as np |
|
| 58 | >>> |
|
| 59 | >>> config = { |
|
| 60 | ... "search_space": { |
|
| 61 | ... "C": [0.01, 0.1, 1, 10], |
|
| 62 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 63 | ... }, |
|
| 64 | ... "n_iter": 100, |
|
| 65 | ... } |
|
| 66 | >>> optimizer = DifferentialEvolution(experiment=sklearn_exp, **config) |
|
| 67 | ||
| 68 | 3. running the optimization: |
|
| 69 | >>> best_params = optimizer.solve() |
|
| 70 | ||
| 71 | Best parameters can also be accessed via: |
|
| 72 | >>> best_params = optimizer.best_params_ |
|
| 73 | """ |
|
| 74 | ||
| 75 | _tags = { |
|
| 76 | "info:name": "Differential Evolution", |
|
| 77 | "info:local_vs_global": "global", |
|
| 78 | "info:explore_vs_exploit": "explore", |
|
| 79 | "info:compute": "middle", |
|
| 80 | } |
|
| 81 | ||
| 82 | def __init__( |
|
| 83 | self, |
|
| 84 | search_space=None, |
|
| 85 | initialize=None, |
|
| 86 | constraints=None, |
|
| 87 | random_state=None, |
|
| 88 | rand_rest_p=0.1, |
|
| 89 | population=10, |
|
| 90 | mutation_rate=0.9, |
|
| 91 | crossover_rate=0.9, |
|
| 92 | n_iter=100, |
|
| 93 | verbose=False, |
|
| 94 | experiment=None, |
|
| 95 | ): |
|
| 96 | self.random_state = random_state |
|
| 97 | self.rand_rest_p = rand_rest_p |
|
| 98 | self.population = population |
|
| 99 | self.mutation_rate = mutation_rate |
|
| 100 | self.crossover_rate = crossover_rate |
|
| 101 | self.search_space = search_space |
|
| 102 | self.initialize = initialize |
|
| 103 | self.constraints = constraints |
|
| 104 | self.n_iter = n_iter |
|
| 105 | self.experiment = experiment |
|
| 106 | self.verbose = verbose |
|
| 107 | ||
| 108 | super().__init__() |
|
| 109 | ||
| 110 | def _get_gfo_class(self): |
|
| 111 | """Get the GFO class to use. |
|
| 112 | ||
| 113 | Returns |
|
| 114 | ------- |
|
| 115 | class |
|
| 116 | The GFO class to use. One of the concrete GFO classes |
|
| 117 | """ |
|
| 118 | from gradient_free_optimizers import DifferentialEvolutionOptimizer |
|
| 119 | ||
| 120 | return DifferentialEvolutionOptimizer |
|
| 121 | ||
| 122 | @classmethod |
|
| 123 | def get_test_params(cls, parameter_set="default"): |
|
| 124 | """Get the test parameters for the optimizer. |
|
| 125 | ||
| 126 | Returns |
|
| 127 | ------- |
|
| 128 | dict with str keys |
|
| 129 | The test parameters dictionary. |
|
| 130 | """ |
|
| 131 | params = super().get_test_params() |
|
| 132 | experiment = params[0]["experiment"] |
|
| 133 | more_params = { |
|
| 134 | "experiment": experiment, |
|
| 135 | "population": 8, |
|
| 136 | "mutation_rate": 0.8, |
|
| 137 | "crossover_rate": 0.7, |
|
| 138 | "search_space": { |
|
| 139 | "C": [0.01, 0.1, 1, 10], |
|
| 140 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 141 | }, |
|
| 142 | "n_iter": 100, |
|
| 143 | } |
|
| 144 | params.append(more_params) |
|
| 145 | return params |
|
| 146 | ||
| @@ 4-143 (lines=140) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class ParallelTempering(_BaseGFOadapter): |
|
| 5 | """Parallel tempering optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | epsilon : float |
|
| 25 | The step-size for the climbing. |
|
| 26 | distribution : str |
|
| 27 | The type of distribution to sample from. |
|
| 28 | n_neighbours : int |
|
| 29 | The number of neighbours to sample and evaluate before moving to the best |
|
| 30 | of those neighbours. |
|
| 31 | n_iter : int, default=100 |
|
| 32 | The number of iterations to run the optimizer. |
|
| 33 | verbose : bool, default=False |
|
| 34 | If True, print the progress of the optimization process. |
|
| 35 | experiment : BaseExperiment, optional |
|
| 36 | The experiment to optimize parameters for. |
|
| 37 | Optional, can be passed later via ``set_params``. |
|
| 38 | ||
| 39 | Examples |
|
| 40 | -------- |
|
| 41 | Basic usage of ParallelTempering with a scikit-learn experiment: |
|
| 42 | ||
| 43 | 1. defining the experiment to optimize: |
|
| 44 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 45 | >>> from sklearn.datasets import load_iris |
|
| 46 | >>> from sklearn.svm import SVC |
|
| 47 | >>> |
|
| 48 | >>> X, y = load_iris(return_X_y=True) |
|
| 49 | >>> |
|
| 50 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 51 | ... estimator=SVC(), |
|
| 52 | ... X=X, |
|
| 53 | ... y=y, |
|
| 54 | ... ) |
|
| 55 | ||
| 56 | 2. setting up the parallelTempering optimizer: |
|
| 57 | >>> from hyperactive.opt import ParallelTempering |
|
| 58 | >>> import numpy as np |
|
| 59 | >>> |
|
| 60 | >>> config = { |
|
| 61 | ... "search_space": { |
|
| 62 | ... "C": [0.01, 0.1, 1, 10], |
|
| 63 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 64 | ... }, |
|
| 65 | ... "n_iter": 100, |
|
| 66 | ... } |
|
| 67 | >>> optimizer = ParallelTempering(experiment=sklearn_exp, **config) |
|
| 68 | ||
| 69 | 3. running the optimization: |
|
| 70 | >>> best_params = optimizer.solve() |
|
| 71 | ||
| 72 | Best parameters can also be accessed via: |
|
| 73 | >>> best_params = optimizer.best_params_ |
|
| 74 | """ |
|
| 75 | ||
| 76 | _tags = { |
|
| 77 | "info:name": "Parallel Tempering", |
|
| 78 | "info:local_vs_global": "global", |
|
| 79 | "info:explore_vs_exploit": "explore", |
|
| 80 | "info:compute": "high", |
|
| 81 | } |
|
| 82 | ||
| 83 | def __init__( |
|
| 84 | self, |
|
| 85 | search_space=None, |
|
| 86 | initialize=None, |
|
| 87 | constraints=None, |
|
| 88 | random_state=None, |
|
| 89 | rand_rest_p=0.1, |
|
| 90 | population: int = 5, |
|
| 91 | n_iter_swap: int = 5, |
|
| 92 | n_iter=100, |
|
| 93 | verbose=False, |
|
| 94 | experiment=None, |
|
| 95 | ): |
|
| 96 | self.random_state = random_state |
|
| 97 | self.rand_rest_p = rand_rest_p |
|
| 98 | self.population = population |
|
| 99 | self.n_iter_swap = n_iter_swap |
|
| 100 | self.search_space = search_space |
|
| 101 | self.initialize = initialize |
|
| 102 | self.constraints = constraints |
|
| 103 | self.n_iter = n_iter |
|
| 104 | self.experiment = experiment |
|
| 105 | self.verbose = verbose |
|
| 106 | ||
| 107 | super().__init__() |
|
| 108 | ||
| 109 | def _get_gfo_class(self): |
|
| 110 | """Get the GFO class to use. |
|
| 111 | ||
| 112 | Returns |
|
| 113 | ------- |
|
| 114 | class |
|
| 115 | The GFO class to use. One of the concrete GFO classes |
|
| 116 | """ |
|
| 117 | from gradient_free_optimizers import ParallelTemperingOptimizer |
|
| 118 | ||
| 119 | return ParallelTemperingOptimizer |
|
| 120 | ||
| 121 | @classmethod |
|
| 122 | def get_test_params(cls, parameter_set="default"): |
|
| 123 | """Get the test parameters for the optimizer. |
|
| 124 | ||
| 125 | Returns |
|
| 126 | ------- |
|
| 127 | dict with str keys |
|
| 128 | The test parameters dictionary. |
|
| 129 | """ |
|
| 130 | params = super().get_test_params() |
|
| 131 | experiment = params[0]["experiment"] |
|
| 132 | more_params = { |
|
| 133 | "experiment": experiment, |
|
| 134 | "population": 10, |
|
| 135 | "n_iter_swap": 3, |
|
| 136 | "search_space": { |
|
| 137 | "C": [0.01, 0.1, 1, 10], |
|
| 138 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 139 | }, |
|
| 140 | "n_iter": 100, |
|
| 141 | } |
|
| 142 | params.append(more_params) |
|
| 143 | return params |
|
| 144 | ||
| @@ 4-140 (lines=137) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class GridSearch(_BaseGFOadapter): |
|
| 5 | """Grid search optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | step_size : int |
|
| 25 | The step-size for the grid search. |
|
| 26 | direction : "diagonal" or "orthogonal" |
|
| 27 | The direction of the grid search. |
|
| 28 | n_iter : int, default=100 |
|
| 29 | The number of iterations to run the optimizer. |
|
| 30 | verbose : bool, default=False |
|
| 31 | If True, print the progress of the optimization process. |
|
| 32 | experiment : BaseExperiment, optional |
|
| 33 | The experiment to optimize parameters for. |
|
| 34 | Optional, can be passed later via ``set_params``. |
|
| 35 | ||
| 36 | Examples |
|
| 37 | -------- |
|
| 38 | Basic usage of GridSearch with a scikit-learn experiment: |
|
| 39 | ||
| 40 | 1. defining the experiment to optimize: |
|
| 41 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 42 | >>> from sklearn.datasets import load_iris |
|
| 43 | >>> from sklearn.svm import SVC |
|
| 44 | >>> |
|
| 45 | >>> X, y = load_iris(return_X_y=True) |
|
| 46 | >>> |
|
| 47 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 48 | ... estimator=SVC(), |
|
| 49 | ... X=X, |
|
| 50 | ... y=y, |
|
| 51 | ... ) |
|
| 52 | ||
| 53 | 2. setting up the gridSearch optimizer: |
|
| 54 | >>> from hyperactive.opt import GridSearch |
|
| 55 | >>> import numpy as np |
|
| 56 | >>> |
|
| 57 | >>> config = { |
|
| 58 | ... "search_space": { |
|
| 59 | ... "C": [0.01, 0.1, 1, 10], |
|
| 60 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 61 | ... }, |
|
| 62 | ... "n_iter": 100, |
|
| 63 | ... } |
|
| 64 | >>> optimizer = GridSearch(experiment=sklearn_exp, **config) |
|
| 65 | ||
| 66 | 3. running the optimization: |
|
| 67 | >>> best_params = optimizer.solve() |
|
| 68 | ||
| 69 | Best parameters can also be accessed via: |
|
| 70 | >>> best_params = optimizer.best_params_ |
|
| 71 | """ |
|
| 72 | ||
| 73 | _tags = { |
|
| 74 | "info:name": "Grid Search", |
|
| 75 | "info:local_vs_global": "global", |
|
| 76 | "info:explore_vs_exploit": "explore", |
|
| 77 | "info:compute": "high", |
|
| 78 | } |
|
| 79 | ||
| 80 | def __init__( |
|
| 81 | self, |
|
| 82 | search_space=None, |
|
| 83 | initialize=None, |
|
| 84 | constraints=None, |
|
| 85 | random_state=None, |
|
| 86 | rand_rest_p=0.1, |
|
| 87 | step_size=1, |
|
| 88 | direction="diagonal", |
|
| 89 | n_iter=100, |
|
| 90 | verbose=False, |
|
| 91 | experiment=None, |
|
| 92 | ): |
|
| 93 | self.random_state = random_state |
|
| 94 | self.rand_rest_p = rand_rest_p |
|
| 95 | self.step_size = step_size |
|
| 96 | self.direction = direction |
|
| 97 | self.search_space = search_space |
|
| 98 | self.initialize = initialize |
|
| 99 | self.constraints = constraints |
|
| 100 | self.n_iter = n_iter |
|
| 101 | self.experiment = experiment |
|
| 102 | self.verbose = verbose |
|
| 103 | ||
| 104 | super().__init__() |
|
| 105 | ||
| 106 | def _get_gfo_class(self): |
|
| 107 | """Get the GFO class to use. |
|
| 108 | ||
| 109 | Returns |
|
| 110 | ------- |
|
| 111 | class |
|
| 112 | The GFO class to use. One of the concrete GFO classes |
|
| 113 | """ |
|
| 114 | from gradient_free_optimizers import GridSearchOptimizer |
|
| 115 | ||
| 116 | return GridSearchOptimizer |
|
| 117 | ||
| 118 | @classmethod |
|
| 119 | def get_test_params(cls, parameter_set="default"): |
|
| 120 | """Get the test parameters for the optimizer. |
|
| 121 | ||
| 122 | Returns |
|
| 123 | ------- |
|
| 124 | dict with str keys |
|
| 125 | The test parameters dictionary. |
|
| 126 | """ |
|
| 127 | params = super().get_test_params() |
|
| 128 | experiment = params[0]["experiment"] |
|
| 129 | more_params = { |
|
| 130 | "experiment": experiment, |
|
| 131 | "step_size": 3, |
|
| 132 | "direction": "orthogonal", |
|
| 133 | "search_space": { |
|
| 134 | "C": [0.01, 0.1, 1, 10], |
|
| 135 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 136 | }, |
|
| 137 | "n_iter": 100, |
|
| 138 | } |
|
| 139 | params.append(more_params) |
|
| 140 | return params |
|
| 141 | ||