| @@ 4-154 (lines=151) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class BayesianOptimizer(_BaseGFOadapter): |
|
| 5 | """Bayesian optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the search process. |
|
| 24 | warm_start_smbo |
|
| 25 | The warm start for SMBO. |
|
| 26 | max_sample_size : int |
|
| 27 | The maximum number of points to sample. |
|
| 28 | sampling : dict |
|
| 29 | The sampling method to use. |
|
| 30 | replacement : bool |
|
| 31 | Whether to sample with replacement. |
|
| 32 | gpr : dict |
|
| 33 | The Gaussian Process Regressor to use. |
|
| 34 | xi : float |
|
| 35 | The exploration-exploitation trade-off parameter. |
|
| 36 | n_iter : int, default=100 |
|
| 37 | The number of iterations to run the optimizer. |
|
| 38 | verbose : bool, default=False |
|
| 39 | If True, print the progress of the optimization process. |
|
| 40 | experiment : BaseExperiment, optional |
|
| 41 | The experiment to optimize parameters for. |
|
| 42 | Optional, can be passed later via ``set_params``. |
|
| 43 | ||
| 44 | Examples |
|
| 45 | -------- |
|
| 46 | Basic usage of BayesianOptimizer with a scikit-learn experiment: |
|
| 47 | ||
| 48 | 1. defining the experiment to optimize: |
|
| 49 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 50 | >>> from sklearn.datasets import load_iris |
|
| 51 | >>> from sklearn.svm import SVC |
|
| 52 | >>> |
|
| 53 | >>> X, y = load_iris(return_X_y=True) |
|
| 54 | >>> |
|
| 55 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 56 | ... estimator=SVC(), |
|
| 57 | ... X=X, |
|
| 58 | ... y=y, |
|
| 59 | ... ) |
|
| 60 | ||
| 61 | 2. setting up the bayesianOptimizer optimizer: |
|
| 62 | >>> from hyperactive.opt import BayesianOptimizer |
|
| 63 | >>> import numpy as np |
|
| 64 | >>> |
|
| 65 | >>> config = { |
|
| 66 | ... "search_space": { |
|
| 67 | ... "C": [0.01, 0.1, 1, 10], |
|
| 68 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 69 | ... }, |
|
| 70 | ... "n_iter": 100, |
|
| 71 | ... } |
|
| 72 | >>> optimizer = BayesianOptimizer(experiment=sklearn_exp, **config) |
|
| 73 | ||
| 74 | 3. running the optimization: |
|
| 75 | >>> best_params = optimizer.solve() |
|
| 76 | ||
| 77 | Best parameters can also be accessed via: |
|
| 78 | >>> best_params = optimizer.best_params_ |
|
| 79 | """ |
|
| 80 | ||
| 81 | _tags = { |
|
| 82 | "info:name": "Bayesian Optimization", |
|
| 83 | "info:local_vs_global": "global", |
|
| 84 | "info:explore_vs_exploit": "exploit", |
|
| 85 | "info:compute": "high", |
|
| 86 | } |
|
| 87 | ||
| 88 | def __init__( |
|
| 89 | self, |
|
| 90 | search_space=None, |
|
| 91 | initialize=None, |
|
| 92 | constraints=None, |
|
| 93 | random_state=None, |
|
| 94 | rand_rest_p=0.1, |
|
| 95 | warm_start_smbo=None, |
|
| 96 | max_sample_size=10000000, |
|
| 97 | sampling=None, |
|
| 98 | replacement=True, |
|
| 99 | xi=0.03, |
|
| 100 | n_iter=100, |
|
| 101 | verbose=False, |
|
| 102 | experiment=None, |
|
| 103 | ): |
|
| 104 | self.random_state = random_state |
|
| 105 | self.rand_rest_p = rand_rest_p |
|
| 106 | ||
| 107 | self.warm_start_smbo = warm_start_smbo |
|
| 108 | self.max_sample_size = max_sample_size |
|
| 109 | self.sampling = sampling |
|
| 110 | self.search_space = search_space |
|
| 111 | self.initialize = initialize |
|
| 112 | self.constraints = constraints |
|
| 113 | self.replacement = replacement |
|
| 114 | self.xi = xi |
|
| 115 | self.n_iter = n_iter |
|
| 116 | self.experiment = experiment |
|
| 117 | self.verbose = verbose |
|
| 118 | ||
| 119 | super().__init__() |
|
| 120 | ||
| 121 | def _get_gfo_class(self): |
|
| 122 | """Get the GFO class to use. |
|
| 123 | ||
| 124 | Returns |
|
| 125 | ------- |
|
| 126 | class |
|
| 127 | The GFO class to use. One of the concrete GFO classes |
|
| 128 | """ |
|
| 129 | from gradient_free_optimizers import BayesianOptimizer |
|
| 130 | ||
| 131 | return BayesianOptimizer |
|
| 132 | ||
| 133 | @classmethod |
|
| 134 | def get_test_params(cls, parameter_set="default"): |
|
| 135 | """Get the test parameters for the optimizer. |
|
| 136 | ||
| 137 | Returns |
|
| 138 | ------- |
|
| 139 | dict with str keys |
|
| 140 | The test parameters dictionary. |
|
| 141 | """ |
|
| 142 | params = super().get_test_params() |
|
| 143 | experiment = params[0]["experiment"] |
|
| 144 | more_params = { |
|
| 145 | "experiment": experiment, |
|
| 146 | "xi": 0.33, |
|
| 147 | "search_space": { |
|
| 148 | "C": [0.01, 0.1, 1, 10], |
|
| 149 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 150 | }, |
|
| 151 | "n_iter": 100, |
|
| 152 | } |
|
| 153 | params.append(more_params) |
|
| 154 | return params |
|
| 155 | ||
| @@ 4-154 (lines=151) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class TreeStructuredParzenEstimators(_BaseGFOadapter): |
|
| 5 | """Tree structured parzen estimators optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | Optional, can be passed later via ``set_params``. |
|
| 13 | initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4} |
|
| 14 | The method to generate initial positions. A dictionary with |
|
| 15 | the following key literals and the corresponding value type: |
|
| 16 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 17 | constraints : list[callable], default=[] |
|
| 18 | A list of constraints, where each constraint is a callable. |
|
| 19 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 20 | random_state : None, int, default=None |
|
| 21 | If None, create a new random state. If int, create a new random state |
|
| 22 | seeded with the value. |
|
| 23 | rand_rest_p : float, default=0.1 |
|
| 24 | The probability of a random iteration during the the search process. |
|
| 25 | warm_start_smbo |
|
| 26 | The warm start for SMBO. |
|
| 27 | max_sample_size : int |
|
| 28 | The maximum number of points to sample. |
|
| 29 | sampling : dict0 |
|
| 30 | The sampling method to use. |
|
| 31 | replacement : bool |
|
| 32 | Whether to sample with replacement. |
|
| 33 | gamma_tpe : float |
|
| 34 | The parameter for the Tree Structured Parzen Estimators |
|
| 35 | n_iter : int, default=100 |
|
| 36 | The number of iterations to run the optimizer. |
|
| 37 | verbose : bool, default=False |
|
| 38 | If True, print the progress of the optimization process. |
|
| 39 | experiment : BaseExperiment, optional |
|
| 40 | The experiment to optimize parameters for. |
|
| 41 | Optional, can be passed later via ``set_params``. |
|
| 42 | ||
| 43 | Examples |
|
| 44 | -------- |
|
| 45 | Basic usage of TreeStructuredParzenEstimators with a scikit-learn experiment: |
|
| 46 | ||
| 47 | 1. defining the experiment to optimize: |
|
| 48 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 49 | >>> from sklearn.datasets import load_iris |
|
| 50 | >>> from sklearn.svm import SVC |
|
| 51 | >>> |
|
| 52 | >>> X, y = load_iris(return_X_y=True) |
|
| 53 | >>> |
|
| 54 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 55 | ... estimator=SVC(), |
|
| 56 | ... X=X, |
|
| 57 | ... y=y, |
|
| 58 | ... ) |
|
| 59 | ||
| 60 | 2. setting up the treeStructuredParzenEstimators optimizer: |
|
| 61 | >>> from hyperactive.opt import TreeStructuredParzenEstimators |
|
| 62 | >>> import numpy as np |
|
| 63 | >>> |
|
| 64 | >>> config = { |
|
| 65 | ... "search_space": { |
|
| 66 | ... "C": [0.01, 0.1, 1, 10], |
|
| 67 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 68 | ... }, |
|
| 69 | ... "n_iter": 100, |
|
| 70 | ... } |
|
| 71 | >>> optimizer = TreeStructuredParzenEstimators(experiment=sklearn_exp, **config) |
|
| 72 | ||
| 73 | 3. running the optimization: |
|
| 74 | >>> best_params = optimizer.solve() |
|
| 75 | ||
| 76 | Best parameters can also be accessed via: |
|
| 77 | >>> best_params = optimizer.best_params_ |
|
| 78 | """ |
|
| 79 | ||
| 80 | _tags = { |
|
| 81 | "info:name": "Tree Structured Parzen Estimators", |
|
| 82 | "info:local_vs_global": "mixed", # "local", "mixed", "global" |
|
| 83 | "info:explore_vs_exploit": "mixed", # "explore", "exploit", "mixed" |
|
| 84 | "info:compute": "high", # "low", "middle", "high" |
|
| 85 | } |
|
| 86 | ||
| 87 | def __init__( |
|
| 88 | self, |
|
| 89 | search_space=None, |
|
| 90 | initialize=None, |
|
| 91 | constraints=None, |
|
| 92 | random_state=None, |
|
| 93 | rand_rest_p=0.1, |
|
| 94 | warm_start_smbo=None, |
|
| 95 | max_sample_size=10000000, |
|
| 96 | sampling=None, |
|
| 97 | replacement=True, |
|
| 98 | gamma_tpe=0.2, |
|
| 99 | n_iter=100, |
|
| 100 | verbose=False, |
|
| 101 | experiment=None, |
|
| 102 | ): |
|
| 103 | self.random_state = random_state |
|
| 104 | self.rand_rest_p = rand_rest_p |
|
| 105 | self.warm_start_smbo = warm_start_smbo |
|
| 106 | self.max_sample_size = max_sample_size |
|
| 107 | self.sampling = sampling |
|
| 108 | self.replacement = replacement |
|
| 109 | self.gamma_tpe = gamma_tpe |
|
| 110 | self.search_space = search_space |
|
| 111 | self.initialize = initialize |
|
| 112 | self.constraints = constraints |
|
| 113 | self.n_iter = n_iter |
|
| 114 | self.experiment = experiment |
|
| 115 | self.verbose = verbose |
|
| 116 | ||
| 117 | super().__init__() |
|
| 118 | ||
| 119 | def _get_gfo_class(self): |
|
| 120 | """Get the GFO class to use. |
|
| 121 | ||
| 122 | Returns |
|
| 123 | ------- |
|
| 124 | class |
|
| 125 | The GFO class to use. One of the concrete GFO classes |
|
| 126 | """ |
|
| 127 | from gradient_free_optimizers import TreeStructuredParzenEstimators |
|
| 128 | ||
| 129 | return TreeStructuredParzenEstimators |
|
| 130 | ||
| 131 | @classmethod |
|
| 132 | def get_test_params(cls, parameter_set="default"): |
|
| 133 | """Get the test parameters for the optimizer. |
|
| 134 | ||
| 135 | Returns |
|
| 136 | ------- |
|
| 137 | dict with str keys |
|
| 138 | The test parameters dictionary. |
|
| 139 | """ |
|
| 140 | params = super().get_test_params() |
|
| 141 | experiment = params[0]["experiment"] |
|
| 142 | more_params = { |
|
| 143 | "experiment": experiment, |
|
| 144 | "max_sample_size": 100, |
|
| 145 | "replacement": True, |
|
| 146 | "gamma_tpe": 0.01, |
|
| 147 | "search_space": { |
|
| 148 | "C": [0.01, 0.1, 1, 10], |
|
| 149 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 150 | }, |
|
| 151 | "n_iter": 100, |
|
| 152 | } |
|
| 153 | params.append(more_params) |
|
| 154 | return params |
|
| 155 | ||
| @@ 4-148 (lines=145) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class LipschitzOptimizer(_BaseGFOadapter): |
|
| 5 | """Lipschitz optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | warm_start_smbo |
|
| 25 | The warm start for SMBO. |
|
| 26 | max_sample_size : int |
|
| 27 | The maximum number of points to sample. |
|
| 28 | sampling : dict |
|
| 29 | The sampling method to use. |
|
| 30 | replacement : bool |
|
| 31 | Whether to sample with replacement. |
|
| 32 | n_iter : int, default=100 |
|
| 33 | The number of iterations to run the optimizer. |
|
| 34 | verbose : bool, default=False |
|
| 35 | If True, print the progress of the optimization process. |
|
| 36 | experiment : BaseExperiment, optional |
|
| 37 | The experiment to optimize parameters for. |
|
| 38 | Optional, can be passed later via ``set_params``. |
|
| 39 | ||
| 40 | Examples |
|
| 41 | -------- |
|
| 42 | Basic usage of LipschitzOptimizer with a scikit-learn experiment: |
|
| 43 | ||
| 44 | 1. defining the experiment to optimize: |
|
| 45 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 46 | >>> from sklearn.datasets import load_iris |
|
| 47 | >>> from sklearn.svm import SVC |
|
| 48 | >>> |
|
| 49 | >>> X, y = load_iris(return_X_y=True) |
|
| 50 | >>> |
|
| 51 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 52 | ... estimator=SVC(), |
|
| 53 | ... X=X, |
|
| 54 | ... y=y, |
|
| 55 | ... ) |
|
| 56 | ||
| 57 | 2. setting up the lipschitzOptimizer optimizer: |
|
| 58 | >>> from hyperactive.opt import LipschitzOptimizer |
|
| 59 | >>> import numpy as np |
|
| 60 | >>> |
|
| 61 | >>> config = { |
|
| 62 | ... "search_space": { |
|
| 63 | ... "C": [0.01, 0.1, 1, 10], |
|
| 64 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 65 | ... }, |
|
| 66 | ... "n_iter": 100, |
|
| 67 | ... } |
|
| 68 | >>> optimizer = LipschitzOptimizer(experiment=sklearn_exp, **config) |
|
| 69 | ||
| 70 | 3. running the optimization: |
|
| 71 | >>> best_params = optimizer.solve() |
|
| 72 | ||
| 73 | Best parameters can also be accessed via: |
|
| 74 | >>> best_params = optimizer.best_params_ |
|
| 75 | """ |
|
| 76 | ||
| 77 | _tags = { |
|
| 78 | "info:name": "Lipschitz Optimization", |
|
| 79 | "info:local_vs_global": "global", |
|
| 80 | "info:explore_vs_exploit": "mixed", |
|
| 81 | "info:compute": "high", |
|
| 82 | } |
|
| 83 | ||
| 84 | def __init__( |
|
| 85 | self, |
|
| 86 | search_space=None, |
|
| 87 | initialize=None, |
|
| 88 | constraints=None, |
|
| 89 | random_state=None, |
|
| 90 | rand_rest_p=0.1, |
|
| 91 | warm_start_smbo=None, |
|
| 92 | max_sample_size=10000000, |
|
| 93 | sampling=None, |
|
| 94 | replacement=True, |
|
| 95 | n_iter=100, |
|
| 96 | verbose=False, |
|
| 97 | experiment=None, |
|
| 98 | ): |
|
| 99 | self.random_state = random_state |
|
| 100 | self.rand_rest_p = rand_rest_p |
|
| 101 | self.warm_start_smbo = warm_start_smbo |
|
| 102 | self.max_sample_size = max_sample_size |
|
| 103 | self.sampling = sampling |
|
| 104 | self.replacement = replacement |
|
| 105 | self.search_space = search_space |
|
| 106 | self.initialize = initialize |
|
| 107 | self.constraints = constraints |
|
| 108 | self.n_iter = n_iter |
|
| 109 | self.experiment = experiment |
|
| 110 | self.verbose = verbose |
|
| 111 | ||
| 112 | super().__init__() |
|
| 113 | ||
| 114 | def _get_gfo_class(self): |
|
| 115 | """Get the GFO class to use. |
|
| 116 | ||
| 117 | Returns |
|
| 118 | ------- |
|
| 119 | class |
|
| 120 | The GFO class to use. One of the concrete GFO classes |
|
| 121 | """ |
|
| 122 | from gradient_free_optimizers import LipschitzOptimizer |
|
| 123 | ||
| 124 | return LipschitzOptimizer |
|
| 125 | ||
| 126 | @classmethod |
|
| 127 | def get_test_params(cls, parameter_set="default"): |
|
| 128 | """Get the test parameters for the optimizer. |
|
| 129 | ||
| 130 | Returns |
|
| 131 | ------- |
|
| 132 | dict with str keys |
|
| 133 | The test parameters dictionary. |
|
| 134 | """ |
|
| 135 | params = super().get_test_params() |
|
| 136 | experiment = params[0]["experiment"] |
|
| 137 | more_params = { |
|
| 138 | "experiment": experiment, |
|
| 139 | "max_sample_size": 1000, |
|
| 140 | "replacement": True, |
|
| 141 | "search_space": { |
|
| 142 | "C": [0.01, 0.1, 1, 10], |
|
| 143 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 144 | }, |
|
| 145 | "n_iter": 100, |
|
| 146 | } |
|
| 147 | params.append(more_params) |
|
| 148 | return params |
|
| 149 | ||
| @@ 4-148 (lines=145) @@ | ||
| 1 | from hyperactive.opt._adapters._gfo import _BaseGFOadapter |
|
| 2 | ||
| 3 | ||
| 4 | class DirectAlgorithm(_BaseGFOadapter): |
|
| 5 | """Direct optimizer. |
|
| 6 | ||
| 7 | Parameters |
|
| 8 | ---------- |
|
| 9 | search_space : dict[str, list] |
|
| 10 | The search space to explore. A dictionary with parameter |
|
| 11 | names as keys and a numpy array as values. |
|
| 12 | initialize : dict[str, int] |
|
| 13 | The method to generate initial positions. A dictionary with |
|
| 14 | the following key literals and the corresponding value type: |
|
| 15 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 16 | constraints : list[callable] |
|
| 17 | A list of constraints, where each constraint is a callable. |
|
| 18 | The callable returns `True` or `False` dependend on the input parameters. |
|
| 19 | random_state : None, int |
|
| 20 | If None, create a new random state. If int, create a new random state |
|
| 21 | seeded with the value. |
|
| 22 | rand_rest_p : float |
|
| 23 | The probability of a random iteration during the the search process. |
|
| 24 | warm_start_smbo |
|
| 25 | The warm start for SMBO. |
|
| 26 | max_sample_size : int |
|
| 27 | The maximum number of points to sample. |
|
| 28 | sampling : dict |
|
| 29 | The sampling method to use. |
|
| 30 | replacement : bool |
|
| 31 | Whether to sample with replacement. |
|
| 32 | n_iter : int, default=100 |
|
| 33 | The number of iterations to run the optimizer. |
|
| 34 | verbose : bool, default=False |
|
| 35 | If True, print the progress of the optimization process. |
|
| 36 | experiment : BaseExperiment, optional |
|
| 37 | The experiment to optimize parameters for. |
|
| 38 | Optional, can be passed later via ``set_params``. |
|
| 39 | ||
| 40 | Examples |
|
| 41 | -------- |
|
| 42 | Basic usage of DirectAlgorithm with a scikit-learn experiment: |
|
| 43 | ||
| 44 | 1. defining the experiment to optimize: |
|
| 45 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 46 | >>> from sklearn.datasets import load_iris |
|
| 47 | >>> from sklearn.svm import SVC |
|
| 48 | >>> |
|
| 49 | >>> X, y = load_iris(return_X_y=True) |
|
| 50 | >>> |
|
| 51 | >>> sklearn_exp = SklearnCvExperiment( |
|
| 52 | ... estimator=SVC(), |
|
| 53 | ... X=X, |
|
| 54 | ... y=y, |
|
| 55 | ... ) |
|
| 56 | ||
| 57 | 2. setting up the directAlgorithm optimizer: |
|
| 58 | >>> from hyperactive.opt import DirectAlgorithm |
|
| 59 | >>> import numpy as np |
|
| 60 | >>> |
|
| 61 | >>> config = { |
|
| 62 | ... "search_space": { |
|
| 63 | ... "C": [0.01, 0.1, 1, 10], |
|
| 64 | ... "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 65 | ... }, |
|
| 66 | ... "n_iter": 100, |
|
| 67 | ... } |
|
| 68 | >>> optimizer = DirectAlgorithm(experiment=sklearn_exp, **config) |
|
| 69 | ||
| 70 | 3. running the optimization: |
|
| 71 | >>> best_params = optimizer.solve() |
|
| 72 | ||
| 73 | Best parameters can also be accessed via: |
|
| 74 | >>> best_params = optimizer.best_params_ |
|
| 75 | """ |
|
| 76 | ||
| 77 | _tags = { |
|
| 78 | "info:name": "DIRECT Algorithm", |
|
| 79 | "info:local_vs_global": "global", |
|
| 80 | "info:explore_vs_exploit": "mixed", |
|
| 81 | "info:compute": "high", |
|
| 82 | } |
|
| 83 | ||
| 84 | def __init__( |
|
| 85 | self, |
|
| 86 | search_space=None, |
|
| 87 | initialize=None, |
|
| 88 | constraints=None, |
|
| 89 | random_state=None, |
|
| 90 | rand_rest_p=0.1, |
|
| 91 | warm_start_smbo=None, |
|
| 92 | max_sample_size: int = 10000000, |
|
| 93 | sampling=None, |
|
| 94 | replacement=True, |
|
| 95 | n_iter=100, |
|
| 96 | verbose=False, |
|
| 97 | experiment=None, |
|
| 98 | ): |
|
| 99 | self.random_state = random_state |
|
| 100 | self.rand_rest_p = rand_rest_p |
|
| 101 | self.warm_start_smbo = warm_start_smbo |
|
| 102 | self.max_sample_size = max_sample_size |
|
| 103 | self.sampling = sampling |
|
| 104 | self.search_space = search_space |
|
| 105 | self.initialize = initialize |
|
| 106 | self.constraints = constraints |
|
| 107 | self.replacement = replacement |
|
| 108 | self.n_iter = n_iter |
|
| 109 | self.experiment = experiment |
|
| 110 | self.verbose = verbose |
|
| 111 | ||
| 112 | super().__init__() |
|
| 113 | ||
| 114 | def _get_gfo_class(self): |
|
| 115 | """Get the GFO class to use. |
|
| 116 | ||
| 117 | Returns |
|
| 118 | ------- |
|
| 119 | class |
|
| 120 | The GFO class to use. One of the concrete GFO classes |
|
| 121 | """ |
|
| 122 | from gradient_free_optimizers import DirectAlgorithm |
|
| 123 | ||
| 124 | return DirectAlgorithm |
|
| 125 | ||
| 126 | @classmethod |
|
| 127 | def get_test_params(cls, parameter_set="default"): |
|
| 128 | """Get the test parameters for the optimizer. |
|
| 129 | ||
| 130 | Returns |
|
| 131 | ------- |
|
| 132 | dict with str keys |
|
| 133 | The test parameters dictionary. |
|
| 134 | """ |
|
| 135 | params = super().get_test_params() |
|
| 136 | experiment = params[0]["experiment"] |
|
| 137 | more_params = { |
|
| 138 | "experiment": experiment, |
|
| 139 | "replacement": True, |
|
| 140 | "max_sample_size": 1000, |
|
| 141 | "search_space": { |
|
| 142 | "C": [0.01, 0.1, 1, 10], |
|
| 143 | "gamma": [0.0001, 0.01, 0.1, 1, 10], |
|
| 144 | }, |
|
| 145 | "n_iter": 100, |
|
| 146 | } |
|
| 147 | params.append(more_params) |
|
| 148 | return params |
|
| 149 | ||