Conditions | 5 |
Total Lines | 113 |
Code Lines | 47 |
Lines | 0 |
Ratio | 0 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
1 | """ |
||
49 | def main(): |
||
50 | # === CmaEsSampler Example === |
||
51 | # Covariance Matrix Adaptation Evolution Strategy |
||
52 | |||
53 | # Check if cmaes is available |
||
54 | try: |
||
55 | import cmaes |
||
56 | |||
57 | cmaes_available = True |
||
58 | print(" CMA-ES package is available") |
||
59 | except ImportError: |
||
60 | cmaes_available = False |
||
61 | print("⚠ CMA-ES package not available. Install with: pip install cmaes") |
||
62 | print(" This example will demonstrate the interface but may fail at runtime.") |
||
63 | print() |
||
64 | |||
65 | cmaes_theory() |
||
66 | |||
67 | # Create a continuous optimization problem |
||
68 | X, y = make_regression(n_samples=200, n_features=10, noise=0.1, random_state=42) |
||
69 | print( |
||
70 | f"Dataset: Synthetic regression ({X.shape[0]} samples, {X.shape[1]} features)" |
||
71 | ) |
||
72 | |||
73 | # Create experiment - neural network with continuous parameters |
||
74 | estimator = MLPRegressor(random_state=42, max_iter=1000) |
||
75 | experiment = SklearnCvExperiment( |
||
76 | estimator=estimator, X=X, y=y, cv=3, scoring="neg_mean_squared_error" |
||
77 | ) |
||
78 | |||
79 | # Define search space - ONLY continuous parameters (CMA-ES limitation) |
||
80 | param_space = { |
||
81 | "alpha": (1e-6, 1e-1), # L2 regularization |
||
82 | "learning_rate_init": (1e-4, 1e-1), # Initial learning rate |
||
83 | "beta_1": (0.8, 0.99), # Adam beta1 parameter |
||
84 | "beta_2": (0.9, 0.999), # Adam beta2 parameter |
||
85 | "epsilon": (1e-9, 1e-6), # Adam epsilon parameter |
||
86 | # Note: No categorical parameters - CMA-ES doesn't support them |
||
87 | } |
||
88 | |||
89 | # Search Space (Continuous parameters only): |
||
90 | # for param, space in param_space.items(): |
||
91 | # print(f" {param}: {space}") |
||
92 | # Note: CMA-ES only works with continuous parameters |
||
93 | # For mixed parameter types, consider TPESampler or GPSampler |
||
94 | |||
95 | # Configure CmaEsSampler |
||
96 | optimizer = CmaEsSampler( |
||
97 | param_space=param_space, |
||
98 | n_trials=40, |
||
99 | random_state=42, |
||
100 | experiment=experiment, |
||
101 | sigma0=0.2, # Initial step size (exploration vs exploitation) |
||
102 | n_startup_trials=5, # Random trials before CMA-ES starts |
||
103 | ) |
||
104 | |||
105 | # CmaEsSampler Configuration: |
||
106 | # n_trials: configured above |
||
107 | # sigma0: initial step size |
||
108 | # n_startup_trials: random trials before CMA-ES starts |
||
109 | # Adaptive covariance matrix will be learned during optimization |
||
110 | |||
111 | if not cmaes_available: |
||
112 | print("⚠ Skipping optimization due to missing 'cmaes' package") |
||
113 | print("Install with: pip install cmaes") |
||
114 | return None, None |
||
115 | |||
116 | # Run optimization |
||
117 | # Running CMA-ES optimization... |
||
118 | try: |
||
119 | best_params = optimizer.run() |
||
120 | |||
121 | # Results |
||
122 | print("\n=== Results ===") |
||
123 | print(f"Best parameters: {best_params}") |
||
124 | print(f"Best score: {optimizer.best_score_:.4f}") |
||
125 | print() |
||
126 | |||
127 | except ImportError as e: |
||
128 | print(f"CMA-ES failed: {e}") |
||
129 | print("Install the required package: pip install cmaes") |
||
130 | return None, None |
||
131 | |||
132 | # CMA-ES Behavior Analysis: |
||
133 | # Evolution of search distribution: |
||
134 | # Initial: Spherical distribution (σ₀ * I) |
||
135 | # Early trials: Random exploration to gather information |
||
136 | # Mid-trials: Covariance matrix learns parameter correlations |
||
137 | # Later trials: Focused search along principal component directions |
||
138 | |||
139 | # Adaptive Properties: |
||
140 | # Step size (σ) adapts to local topology |
||
141 | # Covariance matrix (C) learns parameter interactions |
||
142 | # Mean vector (μ) tracks promising regions |
||
143 | # Handles ill-conditioned and rotated problems |
||
144 | |||
145 | # Best Use Cases: |
||
146 | # Continuous optimization problems |
||
147 | # Parameters with potential correlations |
||
148 | # Non-convex, multimodal functions |
||
149 | # When gradient information is unavailable |
||
150 | # Medium-dimensional problems (2-40 parameters) |
||
151 | |||
152 | # Limitations: |
||
153 | # Only continuous parameters (no categorical/discrete) |
||
154 | # Requires additional 'cmaes' package |
||
155 | # Can be slower than TPE for simple problems |
||
156 | # Memory usage grows with parameter dimension |
||
157 | |||
158 | if cmaes_available: |
||
159 | return best_params, optimizer.best_score_ |
||
160 | else: |
||
161 | return None, None |
||
162 | |||
166 |