| @@ 7-126 (lines=120) @@ | ||
| 4 | from .._adapters._base_optuna_adapter import _BaseOptunaAdapter |
|
| 5 | ||
| 6 | ||
| 7 | class NSGAIIIOptimizer(_BaseOptunaAdapter): |
|
| 8 | """NSGA-III multi-objective optimizer. |
|
| 9 | ||
| 10 | Parameters |
|
| 11 | ---------- |
|
| 12 | param_space : dict[str, tuple or list or optuna distributions] |
|
| 13 | The search space to explore. Dictionary with parameter names |
|
| 14 | as keys and either tuples/lists of (low, high) or |
|
| 15 | optuna distribution objects as values. |
|
| 16 | n_trials : int, default=100 |
|
| 17 | Number of optimization trials. |
|
| 18 | initialize : dict[str, int], default=None |
|
| 19 | The method to generate initial positions. A dictionary with |
|
| 20 | the following key literals and the corresponding value type: |
|
| 21 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 22 | random_state : None, int, default=None |
|
| 23 | If None, create a new random state. If int, create a new random state |
|
| 24 | seeded with the value. |
|
| 25 | early_stopping : int, default=None |
|
| 26 | Number of trials after which to stop if no improvement. |
|
| 27 | max_score : float, default=None |
|
| 28 | Maximum score threshold. Stop optimization when reached. |
|
| 29 | population_size : int, default=50 |
|
| 30 | Population size for NSGA-III. |
|
| 31 | mutation_prob : float, default=0.1 |
|
| 32 | Mutation probability for NSGA-III. |
|
| 33 | crossover_prob : float, default=0.9 |
|
| 34 | Crossover probability for NSGA-III. |
|
| 35 | experiment : BaseExperiment, optional |
|
| 36 | The experiment to optimize parameters for. |
|
| 37 | Optional, can be passed later via ``set_params``. |
|
| 38 | ||
| 39 | Examples |
|
| 40 | -------- |
|
| 41 | Basic usage of NSGAIIIOptimizer with a scikit-learn experiment: |
|
| 42 | ||
| 43 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 44 | >>> from hyperactive.opt.optuna import NSGAIIIOptimizer |
|
| 45 | >>> from sklearn.datasets import load_iris |
|
| 46 | >>> from sklearn.svm import SVC |
|
| 47 | >>> X, y = load_iris(return_X_y=True) |
|
| 48 | >>> sklearn_exp = SklearnCvExperiment(estimator=SVC(), X=X, y=y) |
|
| 49 | >>> param_space = { |
|
| 50 | ... "C": (0.01, 10), |
|
| 51 | ... "gamma": (0.0001, 10), |
|
| 52 | ... } |
|
| 53 | >>> optimizer = NSGAIIIOptimizer( |
|
| 54 | ... param_space=param_space, n_trials=50, experiment=sklearn_exp |
|
| 55 | ... ) |
|
| 56 | >>> best_params = optimizer.run() |
|
| 57 | """ |
|
| 58 | ||
| 59 | _tags = { |
|
| 60 | "info:name": "NSGA-III Optimizer", |
|
| 61 | "info:local_vs_global": "global", |
|
| 62 | "info:explore_vs_exploit": "mixed", |
|
| 63 | "info:compute": "high", |
|
| 64 | "python_dependencies": ["optuna"], |
|
| 65 | } |
|
| 66 | ||
| 67 | def __init__( |
|
| 68 | self, |
|
| 69 | param_space=None, |
|
| 70 | n_trials=100, |
|
| 71 | initialize=None, |
|
| 72 | random_state=None, |
|
| 73 | early_stopping=None, |
|
| 74 | max_score=None, |
|
| 75 | population_size=50, |
|
| 76 | mutation_prob=0.1, |
|
| 77 | crossover_prob=0.9, |
|
| 78 | experiment=None, |
|
| 79 | ): |
|
| 80 | self.population_size = population_size |
|
| 81 | self.mutation_prob = mutation_prob |
|
| 82 | self.crossover_prob = crossover_prob |
|
| 83 | ||
| 84 | super().__init__( |
|
| 85 | param_space=param_space, |
|
| 86 | n_trials=n_trials, |
|
| 87 | initialize=initialize, |
|
| 88 | random_state=random_state, |
|
| 89 | early_stopping=early_stopping, |
|
| 90 | max_score=max_score, |
|
| 91 | experiment=experiment, |
|
| 92 | ) |
|
| 93 | ||
| 94 | def _get_optimizer(self): |
|
| 95 | """Get the NSGA-III optimizer. |
|
| 96 | ||
| 97 | Returns |
|
| 98 | ------- |
|
| 99 | optimizer |
|
| 100 | The Optuna NSGAIIIOptimizer instance |
|
| 101 | """ |
|
| 102 | import optuna |
|
| 103 | ||
| 104 | optimizer_kwargs = { |
|
| 105 | "population_size": self.population_size, |
|
| 106 | "mutation_prob": self.mutation_prob, |
|
| 107 | "crossover_prob": self.crossover_prob, |
|
| 108 | } |
|
| 109 | ||
| 110 | if self.random_state is not None: |
|
| 111 | optimizer_kwargs["seed"] = self.random_state |
|
| 112 | ||
| 113 | return optuna.samplers.NSGAIIISampler(**optimizer_kwargs) |
|
| 114 | ||
| 115 | @classmethod |
|
| 116 | def get_test_params(cls, parameter_set="default"): |
|
| 117 | """Return testing parameter settings for the optimizer.""" |
|
| 118 | params = super().get_test_params(parameter_set) |
|
| 119 | params[0].update( |
|
| 120 | { |
|
| 121 | "population_size": 20, |
|
| 122 | "mutation_prob": 0.2, |
|
| 123 | "crossover_prob": 0.8, |
|
| 124 | } |
|
| 125 | ) |
|
| 126 | return params |
|
| 127 | ||
| @@ 7-120 (lines=114) @@ | ||
| 4 | from .._adapters._base_optuna_adapter import _BaseOptunaAdapter |
|
| 5 | ||
| 6 | ||
| 7 | class GPOptimizer(_BaseOptunaAdapter): |
|
| 8 | """Gaussian Process-based Bayesian optimizer. |
|
| 9 | ||
| 10 | Parameters |
|
| 11 | ---------- |
|
| 12 | param_space : dict[str, tuple or list or optuna distributions] |
|
| 13 | The search space to explore. Dictionary with parameter names |
|
| 14 | as keys and either tuples/lists of (low, high) or |
|
| 15 | optuna distribution objects as values. |
|
| 16 | n_trials : int, default=100 |
|
| 17 | Number of optimization trials. |
|
| 18 | initialize : dict[str, int], default=None |
|
| 19 | The method to generate initial positions. A dictionary with |
|
| 20 | the following key literals and the corresponding value type: |
|
| 21 | {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]} |
|
| 22 | random_state : None, int, default=None |
|
| 23 | If None, create a new random state. If int, create a new random state |
|
| 24 | seeded with the value. |
|
| 25 | early_stopping : int, default=None |
|
| 26 | Number of trials after which to stop if no improvement. |
|
| 27 | max_score : float, default=None |
|
| 28 | Maximum score threshold. Stop optimization when reached. |
|
| 29 | n_startup_trials : int, default=10 |
|
| 30 | Number of startup trials for GP. |
|
| 31 | deterministic_objective : bool, default=False |
|
| 32 | Whether the objective function is deterministic. |
|
| 33 | experiment : BaseExperiment, optional |
|
| 34 | The experiment to optimize parameters for. |
|
| 35 | Optional, can be passed later via ``set_params``. |
|
| 36 | ||
| 37 | Examples |
|
| 38 | -------- |
|
| 39 | Basic usage of GPOptimizer with a scikit-learn experiment: |
|
| 40 | ||
| 41 | >>> from hyperactive.experiment.integrations import SklearnCvExperiment |
|
| 42 | >>> from hyperactive.opt.optuna import GPOptimizer |
|
| 43 | >>> from sklearn.datasets import load_iris |
|
| 44 | >>> from sklearn.svm import SVC |
|
| 45 | >>> X, y = load_iris(return_X_y=True) |
|
| 46 | >>> sklearn_exp = SklearnCvExperiment(estimator=SVC(), X=X, y=y) |
|
| 47 | >>> param_space = { |
|
| 48 | ... "C": (0.01, 10), |
|
| 49 | ... "gamma": (0.0001, 10), |
|
| 50 | ... } |
|
| 51 | >>> optimizer = GPOptimizer( |
|
| 52 | ... param_space=param_space, n_trials=50, experiment=sklearn_exp |
|
| 53 | ... ) |
|
| 54 | >>> best_params = optimizer.run() |
|
| 55 | """ |
|
| 56 | ||
| 57 | _tags = { |
|
| 58 | "info:name": "Gaussian Process Optimizer", |
|
| 59 | "info:local_vs_global": "global", |
|
| 60 | "info:explore_vs_exploit": "exploit", |
|
| 61 | "info:compute": "high", |
|
| 62 | "python_dependencies": ["optuna"], |
|
| 63 | } |
|
| 64 | ||
| 65 | def __init__( |
|
| 66 | self, |
|
| 67 | param_space=None, |
|
| 68 | n_trials=100, |
|
| 69 | initialize=None, |
|
| 70 | random_state=None, |
|
| 71 | early_stopping=None, |
|
| 72 | max_score=None, |
|
| 73 | n_startup_trials=10, |
|
| 74 | deterministic_objective=False, |
|
| 75 | experiment=None, |
|
| 76 | ): |
|
| 77 | self.n_startup_trials = n_startup_trials |
|
| 78 | self.deterministic_objective = deterministic_objective |
|
| 79 | ||
| 80 | super().__init__( |
|
| 81 | param_space=param_space, |
|
| 82 | n_trials=n_trials, |
|
| 83 | initialize=initialize, |
|
| 84 | random_state=random_state, |
|
| 85 | early_stopping=early_stopping, |
|
| 86 | max_score=max_score, |
|
| 87 | experiment=experiment, |
|
| 88 | ) |
|
| 89 | ||
| 90 | def _get_optimizer(self): |
|
| 91 | """Get the GP optimizer. |
|
| 92 | ||
| 93 | Returns |
|
| 94 | ------- |
|
| 95 | optimizer |
|
| 96 | The Optuna GPOptimizer instance |
|
| 97 | """ |
|
| 98 | import optuna |
|
| 99 | ||
| 100 | optimizer_kwargs = { |
|
| 101 | "n_startup_trials": self.n_startup_trials, |
|
| 102 | "deterministic_objective": self.deterministic_objective, |
|
| 103 | } |
|
| 104 | ||
| 105 | if self.random_state is not None: |
|
| 106 | optimizer_kwargs["seed"] = self.random_state |
|
| 107 | ||
| 108 | return optuna.samplers.GPSampler(**optimizer_kwargs) |
|
| 109 | ||
| 110 | @classmethod |
|
| 111 | def get_test_params(cls, parameter_set="default"): |
|
| 112 | """Return testing parameter settings for the optimizer.""" |
|
| 113 | params = super().get_test_params(parameter_set) |
|
| 114 | params[0].update( |
|
| 115 | { |
|
| 116 | "n_startup_trials": 5, |
|
| 117 | "deterministic_objective": True, |
|
| 118 | } |
|
| 119 | ) |
|
| 120 | return params |
|
| 121 | ||