| @@ 4-156 (lines=153) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class BayesianOptimizer(_BaseGFOadapter): |
|
| 5 | """Bayesian optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the search process. |
|
| 24 | warm_start_smbo |
|
| 25 | The warm start for SMBO. |
|
| 26 | max_sample_size : int |
|
| 27 | The maximum number of points to sample. |
|
| 28 | sampling : dict |
|
| 29 | The sampling method to use. |
|
| 30 | replacement : bool |
|
| 31 | Whether to sample with replacement. |
|
| 32 | gpr : dict |
|
| 33 | The Gaussian Process Regressor to use. |
|
| 34 | xi : float |
|
| 35 | The exploration-exploitation trade-off parameter. |
|
| 36 | n_iter : int, default=100 |
|
| 37 | The number of iterations to run the optimizer. |
|
| 38 | verbose : bool, default=False |
|
| 39 | If True, print the progress of the optimization process. |
|
| 40 | experiment : BaseExperiment, optional |
|
| 41 | The experiment to optimize parameters for. |
|
| 42 | Optional, can be passed later via ``set_params``. |
|
| 43 | ||
| 44 | Examples |
|
| 45 | -------- |
|
| 46 | Basic usage of BayesianOptimizer with a scikit-learn experiment: |
|
| 47 | ||
| 48 | 1. defining the experiment to optimize: |
|
| 49 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 50 | >>> from sklearn.datasets import load_iris |
|
| 51 | >>> from sklearn.svm import SVC |
|
| 52 | >>> |
|
| 53 | >>> X, y = load_iris(return_X_y=True) |
|
| 54 | >>> |
|
| 55 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 56 | ... estimator=SVC(), |
|
| 57 | ... X=X, |
|
| 58 | ... y=y, |
|
| 59 | ... ) |
|
| 60 | ||
| 61 | 2. setting up the bayesianOptimizer optimizer: |
|
| 62 | >>> from hyperactive.opt import BayesianOptimizer |
|
| 63 | >>> import numpy as np |
|
| 64 | >>> |
|
| 65 | >>> config = { |
|
| 66 | ... "search_space": { |
|
| 67 | ... "C": [0.01, 0.1, 1, 10], |
|
| 68 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 69 | ... }, |
|
| 70 | ... "n_iter": 100, |
|
| 71 | ... } |
|
| 72 | >>> optimizer = BayesianOptimizer(experiment=sklearn_exp, **config) |
|
| 73 | ||
| 74 | 3. running the optimization: |
|
| 75 | >>> best_params = optimizer.run() |
|
| 76 | ||
| 77 | Best parameters can also be accessed via: |
|
| 78 | >>> best_params = optimizer.best_params_ |
|
| 79 | """ |
|
| 80 | ||
| 81 | _tags = { |
|
| 82 | "info:name": "Bayesian Optimization", |
|
| 83 | "info:local_vs_global": "global", |
|
| 84 | "info:explore_vs_exploit": "exploit", |
|
| 85 | "info:compute": "high", |
|
| 86 | } |
|
| 87 | ||
| 88 | def __init__( |
|
| 89 | self, |
|
| 90 | search_space=None, |
|
| 91 | initialize=None, |
|
| 92 | constraints=None, |
|
| 93 | random_state=None, |
|
| 94 | rand_rest_p=0.1, |
|
| 95 | warm_start_smbo=None, |
|
| 96 | max_sample_size=10000000, |
|
| 97 | sampling=None, |
|
| 98 | replacement=True, |
|
| 99 | xi=0.03, |
|
| 100 | n_iter=100, |
|
| 101 | verbose=False, |
|
| 102 | experiment=None, |
|
| 103 | ): |
|
| 104 | self.random_state = random_state |
|
| 105 | self.rand_rest_p = rand_rest_p |
|
| 106 | ||
| 107 | self.warm_start_smbo = warm_start_smbo |
|
| 108 | self.max_sample_size = max_sample_size |
|
| 109 | self.sampling = sampling |
|
| 110 | self.search_space = search_space |
|
| 111 | self.initialize = initialize |
|
| 112 | self.constraints = constraints |
|
| 113 | self.replacement = replacement |
|
| 114 | self.xi = xi |
|
| 115 | self.n_iter = n_iter |
|
| 116 | self.experiment = experiment |
|
| 117 | self.verbose = verbose |
|
| 118 | ||
| 119 | super().__init__() |
|
| 120 | ||
| 121 | def _get_gfo_class(self): |
|
| 122 | """Get the GFO class to use. |
|
| 123 | ||
| 124 | Returns |
|
| 125 | ------- |
|
| 126 | class |
|
| 127 | The GFO class to use. One of the concrete GFO classes |
|
| 128 | """ |
|
| 129 | from gradient_free_optimizers import BayesianOptimizer |
|
| 130 | ||
| 131 | return BayesianOptimizer |
|
| 132 | ||
| 133 | @classmethod |
|
| 134 | def get_test_params(cls, parameter_set="default"): |
|
| 135 | """Get the test parameters for the optimizer. |
|
| 136 | ||
| 137 | Returns |
|
| 138 | ------- |
|
| 139 | dict with str keys |
|
| 140 | The test parameters dictionary. |
|
| 141 | """ |
|
| 142 | import numpy as np |
|
| 143 | ||
| 144 | params = super().get_test_params() |
|
| 145 | experiment = params[0]["experiment"] |
|
| 146 | more_params = { |
|
| 147 | "experiment": experiment, |
|
| 148 | "xi": 0.33, |
|
| 149 | "search_space": { |
|
| 150 | "C": [0.01, 0.1, 1, 10], |
|
| 151 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 152 | }, |
|
| 153 | "n_iter": 100, |
|
| 154 | } |
|
| 155 | params.append(more_params) |
|
| 156 | return params |
|
| 157 | ||
| @@ 4-156 (lines=153) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class TreeStructuredParzenEstimators(_BaseGFOadapter): |
|
| 5 | """Tree structured parzen estimators optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | Optional, can be passed later via ``set_params``. |
|
| 13 | initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4} |
|
| 14 | The method to generate initial positions. A dictionary with |
|
| 15 | the following key literals and the corresponding value type: |
|
| 16 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 17 | constraints : list[callable], default=[] |
|
| 18 | A list of constraints, where each constraint is a callable. |
|
| 19 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 20 | random_state : None, int, default=None |
|
| 21 | If None, create a new random state. If int, create a new random state |
|
| 22 | seeded with the value. |
|
| 23 | rand_rest_p : float, default=0.1 |
|
| 24 | The probability of a random iteration during the the search process. |
|
| 25 | warm_start_smbo |
|
| 26 | The warm start for SMBO. |
|
| 27 | max_sample_size : int |
|
| 28 | The maximum number of points to sample. |
|
| 29 | sampling : dict0 |
|
| 30 | The sampling method to use. |
|
| 31 | replacement : bool |
|
| 32 | Whether to sample with replacement. |
|
| 33 | gamma_tpe : float |
|
| 34 | The parameter for the Tree Structured Parzen Estimators |
|
| 35 | n_iter : int, default=100 |
|
| 36 | The number of iterations to run the optimizer. |
|
| 37 | verbose : bool, default=False |
|
| 38 | If True, print the progress of the optimization process. |
|
| 39 | experiment : BaseExperiment, optional |
|
| 40 | The experiment to optimize parameters for. |
|
| 41 | Optional, can be passed later via ``set_params``. |
|
| 42 | ||
| 43 | Examples |
|
| 44 | -------- |
|
| 45 | Basic usage of TreeStructuredParzenEstimators with a scikit-learn experiment: |
|
| 46 | ||
| 47 | 1. defining the experiment to optimize: |
|
| 48 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 49 | >>> from sklearn.datasets import load_iris |
|
| 50 | >>> from sklearn.svm import SVC |
|
| 51 | >>> |
|
| 52 | >>> X, y = load_iris(return_X_y=True) |
|
| 53 | >>> |
|
| 54 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 55 | ... estimator=SVC(), |
|
| 56 | ... X=X, |
|
| 57 | ... y=y, |
|
| 58 | ... ) |
|
| 59 | ||
| 60 | 2. setting up the treeStructuredParzenEstimators optimizer: |
|
| 61 | >>> from hyperactive.opt import TreeStructuredParzenEstimators |
|
| 62 | >>> import numpy as np |
|
| 63 | >>> |
|
| 64 | >>> config = { |
|
| 65 | ... "search_space": { |
|
| 66 | ... "C": [0.01, 0.1, 1, 10], |
|
| 67 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 68 | ... }, |
|
| 69 | ... "n_iter": 100, |
|
| 70 | ... } |
|
| 71 | >>> optimizer = TreeStructuredParzenEstimators(experiment=sklearn_exp, **config) |
|
| 72 | ||
| 73 | 3. running the optimization: |
|
| 74 | >>> best_params = optimizer.run() |
|
| 75 | ||
| 76 | Best parameters can also be accessed via: |
|
| 77 | >>> best_params = optimizer.best_params_ |
|
| 78 | """ |
|
| 79 | ||
| 80 | _tags = { |
|
| 81 | "info:name": "Tree Structured Parzen Estimators", |
|
| 82 | "info:local_vs_global": "mixed", # "local", "mixed", "global" |
|
| 83 | "info:explore_vs_exploit": "mixed", # "explore", "exploit", "mixed" |
|
| 84 | "info:compute": "high", # "low", "middle", "high" |
|
| 85 | } |
|
| 86 | ||
| 87 | def __init__( |
|
| 88 | self, |
|
| 89 | search_space=None, |
|
| 90 | initialize=None, |
|
| 91 | constraints=None, |
|
| 92 | random_state=None, |
|
| 93 | rand_rest_p=0.1, |
|
| 94 | warm_start_smbo=None, |
|
| 95 | max_sample_size=10000000, |
|
| 96 | sampling=None, |
|
| 97 | replacement=True, |
|
| 98 | gamma_tpe=0.2, |
|
| 99 | n_iter=100, |
|
| 100 | verbose=False, |
|
| 101 | experiment=None, |
|
| 102 | ): |
|
| 103 | self.random_state = random_state |
|
| 104 | self.rand_rest_p = rand_rest_p |
|
| 105 | self.warm_start_smbo = warm_start_smbo |
|
| 106 | self.max_sample_size = max_sample_size |
|
| 107 | self.sampling = sampling |
|
| 108 | self.replacement = replacement |
|
| 109 | self.gamma_tpe = gamma_tpe |
|
| 110 | self.search_space = search_space |
|
| 111 | self.initialize = initialize |
|
| 112 | self.constraints = constraints |
|
| 113 | self.n_iter = n_iter |
|
| 114 | self.experiment = experiment |
|
| 115 | self.verbose = verbose |
|
| 116 | ||
| 117 | super().__init__() |
|
| 118 | ||
| 119 | def _get_gfo_class(self): |
|
| 120 | """Get the GFO class to use. |
|
| 121 | ||
| 122 | Returns |
|
| 123 | ------- |
|
| 124 | class |
|
| 125 | The GFO class to use. One of the concrete GFO classes |
|
| 126 | """ |
|
| 127 | from gradient_free_optimizers import TreeStructuredParzenEstimators |
|
| 128 | ||
| 129 | return TreeStructuredParzenEstimators |
|
| 130 | ||
| 131 | @classmethod |
|
| 132 | def get_test_params(cls, parameter_set="default"): |
|
| 133 | """Get the test parameters for the optimizer. |
|
| 134 | ||
| 135 | Returns |
|
| 136 | ------- |
|
| 137 | dict with str keys |
|
| 138 | The test parameters dictionary. |
|
| 139 | """ |
|
| 140 | import numpy as np |
|
| 141 | ||
| 142 | params = super().get_test_params() |
|
| 143 | experiment = params[0]["experiment"] |
|
| 144 | more_params = { |
|
| 145 | "experiment": experiment, |
|
| 146 | "max_sample_size": 100, |
|
| 147 | "replacement": True, |
|
| 148 | "gamma_tpe": 0.01, |
|
| 149 | "search_space": { |
|
| 150 | "C": [0.01, 0.1, 1, 10], |
|
| 151 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 152 | }, |
|
| 153 | "n_iter": 100, |
|
| 154 | } |
|
| 155 | params.append(more_params) |
|
| 156 | return params |
|
| 157 | ||
| @@ 4-150 (lines=147) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class DirectAlgorithm(_BaseGFOadapter): |
|
| 5 | """Direct optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | warm_start_smbo |
|
| 25 | The warm start for SMBO. |
|
| 26 | max_sample_size : int |
|
| 27 | The maximum number of points to sample. |
|
| 28 | sampling : dict |
|
| 29 | The sampling method to use. |
|
| 30 | replacement : bool |
|
| 31 | Whether to sample with replacement. |
|
| 32 | n_iter : int, default=100 |
|
| 33 | The number of iterations to run the optimizer. |
|
| 34 | verbose : bool, default=False |
|
| 35 | If True, print the progress of the optimization process. |
|
| 36 | experiment : BaseExperiment, optional |
|
| 37 | The experiment to optimize parameters for. |
|
| 38 | Optional, can be passed later via ``set_params``. |
|
| 39 | ||
| 40 | Examples |
|
| 41 | -------- |
|
| 42 | Basic usage of DirectAlgorithm with a scikit-learn experiment: |
|
| 43 | ||
| 44 | 1. defining the experiment to optimize: |
|
| 45 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 46 | >>> from sklearn.datasets import load_iris |
|
| 47 | >>> from sklearn.svm import SVC |
|
| 48 | >>> |
|
| 49 | >>> X, y = load_iris(return_X_y=True) |
|
| 50 | >>> |
|
| 51 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 52 | ... estimator=SVC(), |
|
| 53 | ... X=X, |
|
| 54 | ... y=y, |
|
| 55 | ... ) |
|
| 56 | ||
| 57 | 2. setting up the directAlgorithm optimizer: |
|
| 58 | >>> from hyperactive.opt import DirectAlgorithm |
|
| 59 | >>> import numpy as np |
|
| 60 | >>> |
|
| 61 | >>> config = { |
|
| 62 | ... "search_space": { |
|
| 63 | ... "C": [0.01, 0.1, 1, 10], |
|
| 64 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 65 | ... }, |
|
| 66 | ... "n_iter": 100, |
|
| 67 | ... } |
|
| 68 | >>> optimizer = DirectAlgorithm(experiment=sklearn_exp, **config) |
|
| 69 | ||
| 70 | 3. running the optimization: |
|
| 71 | >>> best_params = optimizer.run() |
|
| 72 | ||
| 73 | Best parameters can also be accessed via: |
|
| 74 | >>> best_params = optimizer.best_params_ |
|
| 75 | """ |
|
| 76 | ||
| 77 | _tags = { |
|
| 78 | "info:name": "DIRECT Algorithm", |
|
| 79 | "info:local_vs_global": "global", |
|
| 80 | "info:explore_vs_exploit": "mixed", |
|
| 81 | "info:compute": "high", |
|
| 82 | } |
|
| 83 | ||
| 84 | def __init__( |
|
| 85 | self, |
|
| 86 | search_space=None, |
|
| 87 | initialize=None, |
|
| 88 | constraints=None, |
|
| 89 | random_state=None, |
|
| 90 | rand_rest_p=0.1, |
|
| 91 | warm_start_smbo=None, |
|
| 92 | max_sample_size: int = 10000000, |
|
| 93 | sampling=None, |
|
| 94 | replacement=True, |
|
| 95 | n_iter=100, |
|
| 96 | verbose=False, |
|
| 97 | experiment=None, |
|
| 98 | ): |
|
| 99 | self.random_state = random_state |
|
| 100 | self.rand_rest_p = rand_rest_p |
|
| 101 | self.warm_start_smbo = warm_start_smbo |
|
| 102 | self.max_sample_size = max_sample_size |
|
| 103 | self.sampling = sampling |
|
| 104 | self.search_space = search_space |
|
| 105 | self.initialize = initialize |
|
| 106 | self.constraints = constraints |
|
| 107 | self.replacement = replacement |
|
| 108 | self.n_iter = n_iter |
|
| 109 | self.experiment = experiment |
|
| 110 | self.verbose = verbose |
|
| 111 | ||
| 112 | super().__init__() |
|
| 113 | ||
| 114 | def _get_gfo_class(self): |
|
| 115 | """Get the GFO class to use. |
|
| 116 | ||
| 117 | Returns |
|
| 118 | ------- |
|
| 119 | class |
|
| 120 | The GFO class to use. One of the concrete GFO classes |
|
| 121 | """ |
|
| 122 | from gradient_free_optimizers import DirectAlgorithm |
|
| 123 | ||
| 124 | return DirectAlgorithm |
|
| 125 | ||
| 126 | @classmethod |
|
| 127 | def get_test_params(cls, parameter_set="default"): |
|
| 128 | """Get the test parameters for the optimizer. |
|
| 129 | ||
| 130 | Returns |
|
| 131 | ------- |
|
| 132 | dict with str keys |
|
| 133 | The test parameters dictionary. |
|
| 134 | """ |
|
| 135 | import numpy as np |
|
| 136 | ||
| 137 | params = super().get_test_params() |
|
| 138 | experiment = params[0]["experiment"] |
|
| 139 | more_params = { |
|
| 140 | "experiment": experiment, |
|
| 141 | "replacement": True, |
|
| 142 | "max_sample_size": 1000, |
|
| 143 | "search_space": { |
|
| 144 | "C": [0.01, 0.1, 1, 10], |
|
| 145 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 146 | }, |
|
| 147 | "n_iter": 100, |
|
| 148 | } |
|
| 149 | params.append(more_params) |
|
| 150 | return params |
|
| 151 | ||
| @@ 4-150 (lines=147) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class LipschitzOptimizer(_BaseGFOadapter): |
|
| 5 | """Lipschitz optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | warm_start_smbo |
|
| 25 | The warm start for SMBO. |
|
| 26 | max_sample_size : int |
|
| 27 | The maximum number of points to sample. |
|
| 28 | sampling : dict |
|
| 29 | The sampling method to use. |
|
| 30 | replacement : bool |
|
| 31 | Whether to sample with replacement. |
|
| 32 | n_iter : int, default=100 |
|
| 33 | The number of iterations to run the optimizer. |
|
| 34 | verbose : bool, default=False |
|
| 35 | If True, print the progress of the optimization process. |
|
| 36 | experiment : BaseExperiment, optional |
|
| 37 | The experiment to optimize parameters for. |
|
| 38 | Optional, can be passed later via ``set_params``. |
|
| 39 | ||
| 40 | Examples |
|
| 41 | -------- |
|
| 42 | Basic usage of LipschitzOptimizer with a scikit-learn experiment: |
|
| 43 | ||
| 44 | 1. defining the experiment to optimize: |
|
| 45 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 46 | >>> from sklearn.datasets import load_iris |
|
| 47 | >>> from sklearn.svm import SVC |
|
| 48 | >>> |
|
| 49 | >>> X, y = load_iris(return_X_y=True) |
|
| 50 | >>> |
|
| 51 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 52 | ... estimator=SVC(), |
|
| 53 | ... X=X, |
|
| 54 | ... y=y, |
|
| 55 | ... ) |
|
| 56 | ||
| 57 | 2. setting up the lipschitzOptimizer optimizer: |
|
| 58 | >>> from hyperactive.opt import LipschitzOptimizer |
|
| 59 | >>> import numpy as np |
|
| 60 | >>> |
|
| 61 | >>> config = { |
|
| 62 | ... "search_space": { |
|
| 63 | ... "C": [0.01, 0.1, 1, 10], |
|
| 64 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 65 | ... }, |
|
| 66 | ... "n_iter": 100, |
|
| 67 | ... } |
|
| 68 | >>> optimizer = LipschitzOptimizer(experiment=sklearn_exp, **config) |
|
| 69 | ||
| 70 | 3. running the optimization: |
|
| 71 | >>> best_params = optimizer.run() |
|
| 72 | ||
| 73 | Best parameters can also be accessed via: |
|
| 74 | >>> best_params = optimizer.best_params_ |
|
| 75 | """ |
|
| 76 | ||
| 77 | _tags = { |
|
| 78 | "info:name": "Lipschitz Optimization", |
|
| 79 | "info:local_vs_global": "global", |
|
| 80 | "info:explore_vs_exploit": "mixed", |
|
| 81 | "info:compute": "high", |
|
| 82 | } |
|
| 83 | ||
| 84 | def __init__( |
|
| 85 | self, |
|
| 86 | search_space=None, |
|
| 87 | initialize=None, |
|
| 88 | constraints=None, |
|
| 89 | random_state=None, |
|
| 90 | rand_rest_p=0.1, |
|
| 91 | warm_start_smbo=None, |
|
| 92 | max_sample_size=10000000, |
|
| 93 | sampling=None, |
|
| 94 | replacement=True, |
|
| 95 | n_iter=100, |
|
| 96 | verbose=False, |
|
| 97 | experiment=None, |
|
| 98 | ): |
|
| 99 | self.random_state = random_state |
|
| 100 | self.rand_rest_p = rand_rest_p |
|
| 101 | self.warm_start_smbo = warm_start_smbo |
|
| 102 | self.max_sample_size = max_sample_size |
|
| 103 | self.sampling = sampling |
|
| 104 | self.replacement = replacement |
|
| 105 | self.search_space = search_space |
|
| 106 | self.initialize = initialize |
|
| 107 | self.constraints = constraints |
|
| 108 | self.n_iter = n_iter |
|
| 109 | self.experiment = experiment |
|
| 110 | self.verbose = verbose |
|
| 111 | ||
| 112 | super().__init__() |
|
| 113 | ||
| 114 | def _get_gfo_class(self): |
|
| 115 | """Get the GFO class to use. |
|
| 116 | ||
| 117 | Returns |
|
| 118 | ------- |
|
| 119 | class |
|
| 120 | The GFO class to use. One of the concrete GFO classes |
|
| 121 | """ |
|
| 122 | from gradient_free_optimizers import LipschitzOptimizer |
|
| 123 | ||
| 124 | return LipschitzOptimizer |
|
| 125 | ||
| 126 | @classmethod |
|
| 127 | def get_test_params(cls, parameter_set="default"): |
|
| 128 | """Get the test parameters for the optimizer. |
|
| 129 | ||
| 130 | Returns |
|
| 131 | ------- |
|
| 132 | dict with str keys |
|
| 133 | The test parameters dictionary. |
|
| 134 | """ |
|
| 135 | import numpy as np |
|
| 136 | ||
| 137 | params = super().get_test_params() |
|
| 138 | experiment = params[0]["experiment"] |
|
| 139 | more_params = { |
|
| 140 | "experiment": experiment, |
|
| 141 | "max_sample_size": 1000, |
|
| 142 | "replacement": True, |
|
| 143 | "search_space": { |
|
| 144 | "C": [0.01, 0.1, 1, 10], |
|
| 145 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 146 | }, |
|
| 147 | "n_iter": 100, |
|
| 148 | } |
|
| 149 | params.append(more_params) |
|
| 150 | return params |
|
| 151 | ||
| @@ 4-147 (lines=144) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class PatternSearch(_BaseGFOadapter): |
|
| 5 | """Pattern search optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | n_positions : int |
|
| 25 | Number of positions that the pattern consists of. |
|
| 26 | pattern_size : float |
|
| 27 | The initial size of the patterns in percentage of the size of the search space in the corresponding dimension. |
|
| 28 | reduction : float |
|
| 29 | The factor that reduces the size of the pattern if no better position is found. |
|
| 30 | n_iter : int, default=100 |
|
| 31 | The number of iterations to run the optimizer. |
|
| 32 | verbose : bool, default=False |
|
| 33 | If True, print the progress of the optimization process. |
|
| 34 | experiment : BaseExperiment, optional |
|
| 35 | The experiment to optimize parameters for. |
|
| 36 | Optional, can be passed later via ``set_params``. |
|
| 37 | ||
| 38 | Examples |
|
| 39 | -------- |
|
| 40 | Basic usage of PatternSearch with a scikit-learn experiment: |
|
| 41 | ||
| 42 | 1. defining the experiment to optimize: |
|
| 43 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 44 | >>> from sklearn.datasets import load_iris |
|
| 45 | >>> from sklearn.svm import SVC |
|
| 46 | >>> |
|
| 47 | >>> X, y = load_iris(return_X_y=True) |
|
| 48 | >>> |
|
| 49 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 50 | ... estimator=SVC(), |
|
| 51 | ... X=X, |
|
| 52 | ... y=y, |
|
| 53 | ... ) |
|
| 54 | ||
| 55 | 2. setting up the patternSearch optimizer: |
|
| 56 | >>> from hyperactive.opt import PatternSearch |
|
| 57 | >>> import numpy as np |
|
| 58 | >>> |
|
| 59 | >>> config = { |
|
| 60 | ... "search_space": { |
|
| 61 | ... "C": [0.01, 0.1, 1, 10], |
|
| 62 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 63 | ... }, |
|
| 64 | ... "n_iter": 100, |
|
| 65 | ... } |
|
| 66 | >>> optimizer = PatternSearch(experiment=sklearn_exp, **config) |
|
| 67 | ||
| 68 | 3. running the optimization: |
|
| 69 | >>> best_params = optimizer.run() |
|
| 70 | ||
| 71 | Best parameters can also be accessed via: |
|
| 72 | >>> best_params = optimizer.best_params_ |
|
| 73 | """ |
|
| 74 | ||
| 75 | _tags = { |
|
| 76 | "info:name": "Pattern Search", |
|
| 77 | "info:local_vs_global": "local", |
|
| 78 | "info:explore_vs_exploit": "explore", |
|
| 79 | "info:compute": "middle", |
|
| 80 | } |
|
| 81 | ||
| 82 | def __init__( |
|
| 83 | self, |
|
| 84 | search_space=None, |
|
| 85 | initialize=None, |
|
| 86 | constraints=None, |
|
| 87 | random_state=None, |
|
| 88 | rand_rest_p=0.1, |
|
| 89 | n_positions=4, |
|
| 90 | pattern_size=0.25, |
|
| 91 | reduction=0.9, |
|
| 92 | n_iter=100, |
|
| 93 | verbose=False, |
|
| 94 | experiment=None, |
|
| 95 | ): |
|
| 96 | self.random_state = random_state |
|
| 97 | self.rand_rest_p = rand_rest_p |
|
| 98 | self.n_positions = n_positions |
|
| 99 | self.pattern_size = pattern_size |
|
| 100 | self.reduction = reduction |
|
| 101 | self.search_space = search_space |
|
| 102 | self.initialize = initialize |
|
| 103 | self.constraints = constraints |
|
| 104 | self.n_iter = n_iter |
|
| 105 | self.experiment = experiment |
|
| 106 | self.verbose = verbose |
|
| 107 | ||
| 108 | super().__init__() |
|
| 109 | ||
| 110 | def _get_gfo_class(self): |
|
| 111 | """Get the GFO class to use. |
|
| 112 | ||
| 113 | Returns |
|
| 114 | ------- |
|
| 115 | class |
|
| 116 | The GFO class to use. One of the concrete GFO classes |
|
| 117 | """ |
|
| 118 | from gradient_free_optimizers import PatternSearch |
|
| 119 | ||
| 120 | return PatternSearch |
|
| 121 | ||
| 122 | @classmethod |
|
| 123 | def get_test_params(cls, parameter_set="default"): |
|
| 124 | """Get the test parameters for the optimizer. |
|
| 125 | ||
| 126 | Returns |
|
| 127 | ------- |
|
| 128 | dict with str keys |
|
| 129 | The test parameters dictionary. |
|
| 130 | """ |
|
| 131 | import numpy as np |
|
| 132 | ||
| 133 | params = super().get_test_params() |
|
| 134 | experiment = params[0]["experiment"] |
|
| 135 | more_params = { |
|
| 136 | "experiment": experiment, |
|
| 137 | "n_positions": 3, |
|
| 138 | "pattern_size": 0.5, |
|
| 139 | "reduction": 0.999, |
|
| 140 | "search_space": { |
|
| 141 | "C": [0.01, 0.1, 1, 10], |
|
| 142 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 143 | }, |
|
| 144 | "n_iter": 100, |
|
| 145 | } |
|
| 146 | params.append(more_params) |
|
| 147 | return params |
|
| 148 | ||