| Conditions | 1 |
| Total Lines | 59 |
| Code Lines | 36 |
| Lines | 0 |
| Ratio | 0 % |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
| 1 | """CMA-ES (Covariance Matrix Adaptation Evolution Strategy) optimizer.""" |
||
| 125 | @classmethod |
||
| 126 | def get_test_params(cls, parameter_set="default"): |
||
| 127 | """Return testing parameter settings for the optimizer.""" |
||
| 128 | from sklearn.datasets import make_regression |
||
| 129 | from sklearn.neural_network import MLPRegressor |
||
| 130 | |||
| 131 | from hyperactive.experiment.integrations import SklearnCvExperiment |
||
| 132 | |||
| 133 | # Test case 1: Basic continuous parameters (from base) |
||
| 134 | params = super().get_test_params(parameter_set) |
||
| 135 | params[0].update( |
||
| 136 | { |
||
| 137 | "sigma0": 0.5, |
||
| 138 | "n_startup_trials": 1, |
||
| 139 | } |
||
| 140 | ) |
||
| 141 | |||
| 142 | # Test case 2: Neural network with continuous parameters only |
||
| 143 | # (CMA-ES specific - only continuous parameters allowed) |
||
| 144 | X, y = make_regression(n_samples=50, n_features=5, noise=0.1, random_state=42) |
||
| 145 | mlp_exp = SklearnCvExperiment( |
||
| 146 | estimator=MLPRegressor(random_state=42, max_iter=100), X=X, y=y, cv=3 |
||
| 147 | ) |
||
| 148 | |||
| 149 | continuous_param_space = { |
||
| 150 | "alpha": (1e-5, 1e-1), # L2 regularization (continuous) |
||
| 151 | "learning_rate_init": (1e-4, 1e-1), # Learning rate (continuous) |
||
| 152 | "beta_1": (0.8, 0.99), # Adam beta1 (continuous) |
||
| 153 | "beta_2": (0.9, 0.999), # Adam beta2 (continuous) |
||
| 154 | # Note: No categorical parameters - CMA-ES doesn't support them |
||
| 155 | } |
||
| 156 | |||
| 157 | params.append( |
||
| 158 | { |
||
| 159 | "param_space": continuous_param_space, |
||
| 160 | "n_trials": 8, # Smaller for faster testing |
||
| 161 | "experiment": mlp_exp, |
||
| 162 | "sigma0": 0.3, # Different sigma for diversity |
||
| 163 | "n_startup_trials": 2, # More startup trials |
||
| 164 | } |
||
| 165 | ) |
||
| 166 | |||
| 167 | # Test case 3: High-dimensional continuous space (CMA-ES strength) |
||
| 168 | high_dim_continuous = { |
||
| 169 | f"x{i}": (-1.0, 1.0) |
||
| 170 | for i in range(6) # 6D continuous optimization |
||
| 171 | } |
||
| 172 | |||
| 173 | params.append( |
||
| 174 | { |
||
| 175 | "param_space": high_dim_continuous, |
||
| 176 | "n_trials": 12, |
||
| 177 | "experiment": mlp_exp, |
||
| 178 | "sigma0": 0.7, # Larger initial spread |
||
| 179 | "n_startup_trials": 3, |
||
| 180 | } |
||
| 181 | ) |
||
| 182 | |||
| 183 | return params |
||
| 184 |