| Total Complexity | 1 |
| Total Lines | 31 |
| Duplicated Lines | 0 % |
| Changes | 0 | ||
| 1 | from sklearn.model_selection import cross_val_score |
||
| 2 | from sklearn.ensemble import GradientBoostingClassifier |
||
| 3 | from sklearn.datasets import load_iris |
||
| 4 | from hyperactive import Hyperactive |
||
| 5 | |||
| 6 | iris_data = load_iris() |
||
| 7 | X = iris_data.data |
||
| 8 | y = iris_data.target |
||
| 9 | |||
| 10 | |||
| 11 | def model(para, X, y): |
||
| 12 | model = GradientBoostingClassifier( |
||
| 13 | n_estimators=para["n_estimators"], max_depth=para["max_depth"] |
||
| 14 | ) |
||
| 15 | scores = cross_val_score(model, X, y, cv=3) |
||
| 16 | |||
| 17 | return scores.mean() |
||
| 18 | |||
| 19 | |||
| 20 | search_config = {model: {"n_estimators": range(10, 200, 10), "max_depth": range(2, 15)}} |
||
| 21 | |||
| 22 | """ |
||
| 23 | The memory will remember previous evaluations done during the optimization process. |
||
| 24 | Instead of retraining the model, it accesses the memory and uses the saved score/loss. |
||
| 25 | This shows as a speed up during the optimization process, since the whole search space has been explored. |
||
| 26 | """ |
||
| 27 | opt = Hyperactive(search_config, n_iter=1000, memory=True) |
||
| 28 | |||
| 29 | # search best hyperparameter for given data |
||
| 30 | opt.fit(X, y) |
||
| 31 |