|
1
|
|
|
""" |
|
2
|
|
|
CmaEsSampler Example - Covariance Matrix Adaptation Evolution Strategy |
|
3
|
|
|
|
|
4
|
|
|
CMA-ES is a powerful evolution strategy particularly effective for continuous |
|
5
|
|
|
optimization problems. It adapts both the mean and covariance matrix of a |
|
6
|
|
|
multivariate normal distribution to efficiently explore the parameter space. |
|
7
|
|
|
|
|
8
|
|
|
Characteristics: |
|
9
|
|
|
- Excellent for continuous parameter optimization |
|
10
|
|
|
- Adapts search distribution shape and orientation |
|
11
|
|
|
- Self-adaptive step size control |
|
12
|
|
|
- Handles ill-conditioned problems well |
|
13
|
|
|
- Does not work with categorical parameters |
|
14
|
|
|
- Requires 'cmaes' package: pip install cmaes |
|
15
|
|
|
|
|
16
|
|
|
Note: This example includes a fallback if 'cmaes' package is not installed. |
|
17
|
|
|
""" |
|
18
|
|
|
|
|
19
|
|
|
import numpy as np |
|
20
|
|
|
from sklearn.datasets import make_regression |
|
21
|
|
|
from sklearn.neural_network import MLPRegressor |
|
22
|
|
|
from sklearn.model_selection import cross_val_score |
|
23
|
|
|
from sklearn.metrics import mean_squared_error |
|
24
|
|
|
|
|
25
|
|
|
from hyperactive.experiment.integrations import SklearnCvExperiment |
|
26
|
|
|
from hyperactive.opt.optuna import CmaEsSampler |
|
27
|
|
|
|
|
28
|
|
|
|
|
29
|
|
|
def cmaes_theory(): |
|
30
|
|
|
"""Explain CMA-ES algorithm theory.""" |
|
31
|
|
|
# CMA-ES Algorithm Theory: |
|
32
|
|
|
# 1. Maintains a multivariate normal distribution N(μ, σ²C) |
|
33
|
|
|
# - μ: mean vector (center of search) |
|
34
|
|
|
# - σ: step size (global scaling) |
|
35
|
|
|
# - C: covariance matrix (shape and orientation) |
|
36
|
|
|
# |
|
37
|
|
|
# 2. In each generation: |
|
38
|
|
|
# - Sample λ offspring from N(μ, σ²C) |
|
39
|
|
|
# - Evaluate all offspring |
|
40
|
|
|
# - Select μ best solutions |
|
41
|
|
|
# - Update μ, σ, and C based on selected solutions |
|
42
|
|
|
# |
|
43
|
|
|
# 3. Adaptive features: |
|
44
|
|
|
# - Covariance matrix learns correlations between parameters |
|
45
|
|
|
# - Step size adapts to local landscape |
|
46
|
|
|
# - Handles rotated/scaled problems efficiently |
|
47
|
|
|
|
|
48
|
|
|
|
|
49
|
|
|
def main(): |
|
50
|
|
|
# === CmaEsSampler Example === |
|
51
|
|
|
# Covariance Matrix Adaptation Evolution Strategy |
|
52
|
|
|
|
|
53
|
|
|
# Check if cmaes is available |
|
54
|
|
|
try: |
|
55
|
|
|
import cmaes |
|
56
|
|
|
|
|
57
|
|
|
cmaes_available = True |
|
58
|
|
|
print(" CMA-ES package is available") |
|
59
|
|
|
except ImportError: |
|
60
|
|
|
cmaes_available = False |
|
61
|
|
|
print("⚠ CMA-ES package not available. Install with: pip install cmaes") |
|
62
|
|
|
print(" This example will demonstrate the interface but may fail at runtime.") |
|
63
|
|
|
print() |
|
64
|
|
|
|
|
65
|
|
|
cmaes_theory() |
|
66
|
|
|
|
|
67
|
|
|
# Create a continuous optimization problem |
|
68
|
|
|
X, y = make_regression(n_samples=200, n_features=10, noise=0.1, random_state=42) |
|
69
|
|
|
print( |
|
70
|
|
|
f"Dataset: Synthetic regression ({X.shape[0]} samples, {X.shape[1]} features)" |
|
71
|
|
|
) |
|
72
|
|
|
|
|
73
|
|
|
# Create experiment - neural network with continuous parameters |
|
74
|
|
|
estimator = MLPRegressor(random_state=42, max_iter=1000) |
|
75
|
|
|
experiment = SklearnCvExperiment( |
|
76
|
|
|
estimator=estimator, X=X, y=y, cv=3, scoring="neg_mean_squared_error" |
|
77
|
|
|
) |
|
78
|
|
|
|
|
79
|
|
|
# Define search space - ONLY continuous parameters (CMA-ES limitation) |
|
80
|
|
|
param_space = { |
|
81
|
|
|
"alpha": (1e-6, 1e-1), # L2 regularization |
|
82
|
|
|
"learning_rate_init": (1e-4, 1e-1), # Initial learning rate |
|
83
|
|
|
"beta_1": (0.8, 0.99), # Adam beta1 parameter |
|
84
|
|
|
"beta_2": (0.9, 0.999), # Adam beta2 parameter |
|
85
|
|
|
"epsilon": (1e-9, 1e-6), # Adam epsilon parameter |
|
86
|
|
|
# Note: No categorical parameters - CMA-ES doesn't support them |
|
87
|
|
|
} |
|
88
|
|
|
|
|
89
|
|
|
# Search Space (Continuous parameters only): |
|
90
|
|
|
# for param, space in param_space.items(): |
|
91
|
|
|
# print(f" {param}: {space}") |
|
92
|
|
|
# Note: CMA-ES only works with continuous parameters |
|
93
|
|
|
# For mixed parameter types, consider TPESampler or GPSampler |
|
94
|
|
|
|
|
95
|
|
|
# Configure CmaEsSampler |
|
96
|
|
|
optimizer = CmaEsSampler( |
|
97
|
|
|
param_space=param_space, |
|
98
|
|
|
n_trials=40, |
|
99
|
|
|
random_state=42, |
|
100
|
|
|
experiment=experiment, |
|
101
|
|
|
sigma0=0.2, # Initial step size (exploration vs exploitation) |
|
102
|
|
|
n_startup_trials=5, # Random trials before CMA-ES starts |
|
103
|
|
|
) |
|
104
|
|
|
|
|
105
|
|
|
# CmaEsSampler Configuration: |
|
106
|
|
|
# n_trials: configured above |
|
107
|
|
|
# sigma0: initial step size |
|
108
|
|
|
# n_startup_trials: random trials before CMA-ES starts |
|
109
|
|
|
# Adaptive covariance matrix will be learned during optimization |
|
110
|
|
|
|
|
111
|
|
|
if not cmaes_available: |
|
112
|
|
|
print("⚠ Skipping optimization due to missing 'cmaes' package") |
|
113
|
|
|
print("Install with: pip install cmaes") |
|
114
|
|
|
return None, None |
|
115
|
|
|
|
|
116
|
|
|
# Run optimization |
|
117
|
|
|
# Running CMA-ES optimization... |
|
118
|
|
|
try: |
|
119
|
|
|
best_params = optimizer.run() |
|
120
|
|
|
|
|
121
|
|
|
# Results |
|
122
|
|
|
print("\n=== Results ===") |
|
123
|
|
|
print(f"Best parameters: {best_params}") |
|
124
|
|
|
print(f"Best score: {optimizer.best_score_:.4f}") |
|
125
|
|
|
print() |
|
126
|
|
|
|
|
127
|
|
|
except ImportError as e: |
|
128
|
|
|
print(f"CMA-ES failed: {e}") |
|
129
|
|
|
print("Install the required package: pip install cmaes") |
|
130
|
|
|
return None, None |
|
131
|
|
|
|
|
132
|
|
|
# CMA-ES Behavior Analysis: |
|
133
|
|
|
# Evolution of search distribution: |
|
134
|
|
|
# Initial: Spherical distribution (σ₀ * I) |
|
135
|
|
|
# Early trials: Random exploration to gather information |
|
136
|
|
|
# Mid-trials: Covariance matrix learns parameter correlations |
|
137
|
|
|
# Later trials: Focused search along principal component directions |
|
138
|
|
|
|
|
139
|
|
|
# Adaptive Properties: |
|
140
|
|
|
# Step size (σ) adapts to local topology |
|
141
|
|
|
# Covariance matrix (C) learns parameter interactions |
|
142
|
|
|
# Mean vector (μ) tracks promising regions |
|
143
|
|
|
# Handles ill-conditioned and rotated problems |
|
144
|
|
|
|
|
145
|
|
|
# Best Use Cases: |
|
146
|
|
|
# Continuous optimization problems |
|
147
|
|
|
# Parameters with potential correlations |
|
148
|
|
|
# Non-convex, multimodal functions |
|
149
|
|
|
# When gradient information is unavailable |
|
150
|
|
|
# Medium-dimensional problems (2-40 parameters) |
|
151
|
|
|
|
|
152
|
|
|
# Limitations: |
|
153
|
|
|
# Only continuous parameters (no categorical/discrete) |
|
154
|
|
|
# Requires additional 'cmaes' package |
|
155
|
|
|
# Can be slower than TPE for simple problems |
|
156
|
|
|
# Memory usage grows with parameter dimension |
|
157
|
|
|
|
|
158
|
|
|
if cmaes_available: |
|
159
|
|
|
return best_params, optimizer.best_score_ |
|
160
|
|
|
else: |
|
161
|
|
|
return None, None |
|
162
|
|
|
|
|
163
|
|
|
|
|
164
|
|
|
if __name__ == "__main__": |
|
165
|
|
|
best_params, best_score = main() |
|
166
|
|
|
|