Code Duplication    Length = 137-150 lines in 5 locations

src/hyperactive/opt/gfo/_simulated_annealing.py 1 location

@@ 4-153 (lines=150) @@
1
from hyperactive.opt._adapters._gfo import _BaseGFOadapter
2
3
4
class SimulatedAnnealing(_BaseGFOadapter):
5
    """Simulated annealing optimizer.
6
7
    Parameters
8
    ----------
9
    search_space : dict[str, list]
10
        The search space to explore. A dictionary with parameter
11
        names as keys and a numpy array as values.
12
    initialize : dict[str, int]
13
        The method to generate initial positions. A dictionary with
14
        the following key literals and the corresponding value type:
15
        {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]}
16
    constraints : list[callable]
17
        A list of constraints, where each constraint is a callable.
18
        The callable returns `True` or `False` dependend on the input parameters.
19
    random_state : None, int
20
        If None, create a new random state. If int, create a new random state
21
        seeded with the value.
22
    rand_rest_p : float
23
        The probability of a random iteration during the the search process.
24
    epsilon : float
25
        The step-size for the climbing.
26
    distribution : str
27
        The type of distribution to sample from.
28
    n_neighbours : int
29
        The number of neighbours to sample and evaluate before moving to the best
30
        of those neighbours.
31
    annealing_rate : float
32
        The rate at which the temperature is annealed.
33
    start_temp : float
34
        The initial temperature.
35
    n_iter : int, default=100
36
        The number of iterations to run the optimizer.
37
    verbose : bool, default=False
38
        If True, print the progress of the optimization process.
39
    experiment : BaseExperiment, optional
40
        The experiment to optimize parameters for.
41
        Optional, can be passed later via ``set_params``.
42
43
    Examples
44
    --------
45
    Basic usage of SimulatedAnnealing with a scikit-learn experiment:
46
47
    1. defining the experiment to optimize:
48
    >>> from hyperactive.experiment.integrations import SklearnCvExperiment
49
    >>> from sklearn.datasets import load_iris
50
    >>> from sklearn.svm import SVC
51
    >>>
52
    >>> X, y = load_iris(return_X_y=True)
53
    >>>
54
    >>> sklearn_exp = SklearnCvExperiment(
55
    ...     estimator=SVC(),
56
    ...     X=X,
57
    ...     y=y,
58
    ... )
59
60
    2. setting up the simulatedAnnealing optimizer:
61
    >>> from hyperactive.opt import SimulatedAnnealing
62
    >>> import numpy as np
63
    >>>
64
    >>> config = {
65
    ...     "search_space": {
66
    ...         "C": [0.01, 0.1, 1, 10],
67
    ...         "gamma": [0.0001, 0.01, 0.1, 1, 10],
68
    ...     },
69
    ...     "n_iter": 100,
70
    ... }
71
    >>> optimizer = SimulatedAnnealing(experiment=sklearn_exp, **config)
72
73
    3. running the optimization:
74
    >>> best_params = optimizer.solve()
75
76
    Best parameters can also be accessed via:
77
    >>> best_params = optimizer.best_params_
78
    """
79
80
    _tags = {
81
        "info:name": "Simulated Annealing",
82
        "info:local_vs_global": "global",
83
        "info:explore_vs_exploit": "explore",
84
        "info:compute": "middle",
85
    }
86
87
    def __init__(
88
        self,
89
        search_space=None,
90
        initialize=None,
91
        constraints=None,
92
        random_state=None,
93
        rand_rest_p=0.1,
94
        epsilon=0.01,
95
        distribution="normal",
96
        n_neighbours=10,
97
        annealing_rate=0.97,
98
        start_temp=1,
99
        n_iter=100,
100
        verbose=False,
101
        experiment=None,
102
    ):
103
        self.random_state = random_state
104
        self.rand_rest_p = rand_rest_p
105
        self.epsilon = epsilon
106
        self.distribution = distribution
107
        self.n_neighbours = n_neighbours
108
        self.annealing_rate = annealing_rate
109
        self.start_temp = start_temp
110
        self.search_space = search_space
111
        self.initialize = initialize
112
        self.constraints = constraints
113
        self.n_iter = n_iter
114
        self.experiment = experiment
115
        self.verbose = verbose
116
117
        super().__init__()
118
119
    def _get_gfo_class(self):
120
        """Get the GFO class to use.
121
122
        Returns
123
        -------
124
        class
125
            The GFO class to use. One of the concrete GFO classes
126
        """
127
        from gradient_free_optimizers import SimulatedAnnealingOptimizer
128
129
        return SimulatedAnnealingOptimizer
130
131
    @classmethod
132
    def get_test_params(cls, parameter_set="default"):
133
        """Get the test parameters for the optimizer.
134
135
        Returns
136
        -------
137
        dict with str keys
138
            The test parameters dictionary.
139
        """
140
        params = super().get_test_params()
141
        experiment = params[0]["experiment"]
142
        more_params = {
143
            "experiment": experiment,
144
            "start_temp": 0.33,
145
            "annealing_rate": 1.01,
146
            "search_space": {
147
                "C": [0.01, 0.1, 1, 10],
148
                "gamma": [0.0001, 0.01, 0.1, 1, 10],
149
            },
150
            "n_iter": 100,
151
        }
152
        params.append(more_params)
153
        return params
154

src/hyperactive/opt/gfo/_stochastic_hillclimbing.py 1 location

@@ 8-153 (lines=146) @@
5
from hyperactive.opt._adapters._gfo import _BaseGFOadapter
6
7
8
class StochasticHillClimbing(_BaseGFOadapter):
9
    """Stochastic hill climbing optimizer.
10
11
    Parameters
12
    ----------
13
    search_space : dict[str, list]
14
        The search space to explore. A dictionary with parameter
15
        names as keys and a numpy array as values.
16
        Optional, can be passed later via ``set_params``.
17
    initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4}
18
        The method to generate initial positions. A dictionary with
19
        the following key literals and the corresponding value type:
20
        {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]}
21
    constraints : list[callable], default=[]
22
        A list of constraints, where each constraint is a callable.
23
        The callable returns `True` or `False` dependend on the input parameters.
24
    random_state : None, int, default=None
25
        If None, create a new random state. If int, create a new random state
26
        seeded with the value.
27
    rand_rest_p : float, default=0.1
28
        The probability of a random iteration during the the search process.
29
    epsilon : float, default=0.01
30
        The step-size for the climbing.
31
    distribution : str, default="normal"
32
        The type of distribution to sample from.
33
    n_neighbours : int, default=10
34
        The number of neighbours to sample and evaluate before moving to the best
35
        of those neighbours.
36
    p_accept : float, default=0.5
37
        The probability of accepting a transition in the hill climbing process.
38
    n_iter : int, default=100
39
        The number of iterations to run the optimizer.
40
    verbose : bool, default=False
41
        If True, print the progress of the optimization process.
42
    experiment : BaseExperiment, optional
43
        The experiment to optimize parameters for.
44
        Optional, can be passed later via ``set_params``.
45
46
    Examples
47
    --------
48
    Hill climbing applied to scikit-learn parameter tuning:
49
50
    1. defining the experiment to optimize:
51
    >>> from hyperactive.experiment.integrations import SklearnCvExperiment
52
    >>> from sklearn.datasets import load_iris
53
    >>> from sklearn.svm import SVC
54
    >>>
55
    >>> X, y = load_iris(return_X_y=True)
56
    >>>
57
    >>> sklearn_exp = SklearnCvExperiment(
58
    ...     estimator=SVC(),
59
    ...     X=X,
60
    ...     y=y,
61
    ... )
62
63
    2. setting up the hill climbing optimizer:
64
    >>> from hyperactive.opt import StochasticHillClimbing
65
    >>> import numpy as np
66
    >>>
67
    >>> config = {
68
    ...     "search_space": {
69
    ...         "C": [0.01, 0.1, 1, 10],
70
    ...         "gamma": [0.0001, 0.01, 0.1, 1, 10],
71
    ...     },
72
    ...     "n_iter": 100,
73
    ... }
74
    >>> hillclimbing = StochasticHillClimbing(experiment=sklearn_exp, **config)
75
76
    3. running the hill climbing search:
77
    >>> best_params = hillclimbing.solve()
78
79
    Best parameters can also be accessed via the attributes:
80
    >>> best_params = hillclimbing.best_params_
81
    """
82
83
    _tags = {
84
        "info:name": "Hill Climbing",
85
        "info:local_vs_global": "local",  # "local", "mixed", "global"
86
        "info:explore_vs_exploit": "exploit",  # "explore", "exploit", "mixed"
87
        "info:compute": "low",  # "low", "middle", "high"
88
    }
89
90
    def __init__(
91
        self,
92
        search_space=None,
93
        initialize=None,
94
        constraints=None,
95
        random_state=None,
96
        rand_rest_p=0.1,
97
        epsilon=0.01,
98
        distribution="normal",
99
        n_neighbours=10,
100
        p_accept=0.5,
101
        n_iter=100,
102
        verbose=False,
103
        experiment=None,
104
    ):
105
        self.random_state = random_state
106
        self.rand_rest_p = rand_rest_p
107
        self.epsilon = epsilon
108
        self.distribution = distribution
109
        self.n_neighbours = n_neighbours
110
        self.search_space = search_space
111
        self.initialize = initialize
112
        self.constraints = constraints
113
        self.p_accept = p_accept
114
        self.n_iter = n_iter
115
        self.experiment = experiment
116
        self.verbose = verbose
117
118
        super().__init__()
119
120
    def _get_gfo_class(self):
121
        """Get the GFO class to use.
122
123
        Returns
124
        -------
125
        class
126
            The GFO class to use. One of the concrete GFO classes
127
        """
128
        from gradient_free_optimizers import StochasticHillClimbingOptimizer
129
130
        return StochasticHillClimbingOptimizer
131
132
    @classmethod
133
    def get_test_params(cls, parameter_set="default"):
134
        """Get the test parameters for the optimizer.
135
136
        Returns
137
        -------
138
        dict with str keys
139
            The test parameters dictionary.
140
        """
141
        params = super().get_test_params()
142
        experiment = params[0]["experiment"]
143
        more_params = {
144
            "experiment": experiment,
145
            "p_accept": 0.33,
146
            "search_space": {
147
                "C": [0.01, 0.1, 1, 10],
148
                "gamma": [0.0001, 0.01, 0.1, 1, 10],
149
            },
150
            "n_iter": 100,
151
        }
152
        params.append(more_params)
153
        return params
154

src/hyperactive/opt/gfo/_repulsing_hillclimbing.py 1 location

@@ 8-153 (lines=146) @@
5
from hyperactive.opt._adapters._gfo import _BaseGFOadapter
6
7
8
class RepulsingHillClimbing(_BaseGFOadapter):
9
    """Repulsing hill climbing optimizer.
10
11
    Parameters
12
    ----------
13
    search_space : dict[str, list]
14
        The search space to explore. A dictionary with parameter
15
        names as keys and a numpy array as values.
16
        Optional, can be passed later via ``set_params``.
17
    initialize : dict[str, int], default={"grid": 4, "random": 2, "vertices": 4}
18
        The method to generate initial positions. A dictionary with
19
        the following key literals and the corresponding value type:
20
        {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]}
21
    constraints : list[callable], default=[]
22
        A list of constraints, where each constraint is a callable.
23
        The callable returns `True` or `False` dependend on the input parameters.
24
    random_state : None, int, default=None
25
        If None, create a new random state. If int, create a new random state
26
        seeded with the value.
27
    rand_rest_p : float, default=0.1
28
        The probability of a random iteration during the the search process.
29
    epsilon : float, default=0.01
30
        The step-size for the climbing.
31
    distribution : str, default="normal"
32
        The type of distribution to sample from.
33
    n_neighbours : int, default=10
34
        The number of neighbours to sample and evaluate before moving to the best
35
        of those neighbours.
36
    repulsion_factor : float, default=5
37
        The factor to control the repulsion of the hill climbing process.
38
    n_iter : int, default=100
39
        The number of iterations to run the optimizer.
40
    verbose : bool, default=False
41
        If True, print the progress of the optimization process.
42
    experiment : BaseExperiment, optional
43
        The experiment to optimize parameters for.
44
        Optional, can be passed later via ``set_params``.
45
46
    Examples
47
    --------
48
    Hill climbing applied to scikit-learn parameter tuning:
49
50
    1. defining the experiment to optimize:
51
    >>> from hyperactive.experiment.integrations import SklearnCvExperiment
52
    >>> from sklearn.datasets import load_iris
53
    >>> from sklearn.svm import SVC
54
    >>>
55
    >>> X, y = load_iris(return_X_y=True)
56
    >>>
57
    >>> sklearn_exp = SklearnCvExperiment(
58
    ...     estimator=SVC(),
59
    ...     X=X,
60
    ...     y=y,
61
    ... )
62
63
    2. setting up the hill climbing optimizer:
64
    >>> from hyperactive.opt import RepulsingHillClimbing
65
    >>> import numpy as np
66
    >>>
67
    >>> config = {
68
    ...     "search_space": {
69
    ...         "C": [0.01, 0.1, 1, 10],
70
    ...         "gamma": [0.0001, 0.01, 0.1, 1, 10],
71
    ...     },
72
    ...     "n_iter": 100,
73
    ... }
74
    >>> hillclimbing = RepulsingHillClimbing(experiment=sklearn_exp, **config)
75
76
    3. running the hill climbing search:
77
    >>> best_params = hillclimbing.solve()
78
79
    Best parameters can also be accessed via the attributes:
80
    >>> best_params = hillclimbing.best_params_
81
    """
82
83
    _tags = {
84
        "info:name": "Repulsing Hill Climbing",
85
        "info:local_vs_global": "mixed",  # "local", "mixed", "global"
86
        "info:explore_vs_exploit": "exploit",  # "explore", "exploit", "mixed"
87
        "info:compute": "low",  # "low", "middle", "high"
88
    }
89
90
    def __init__(
91
        self,
92
        search_space=None,
93
        initialize=None,
94
        constraints=None,
95
        random_state=None,
96
        rand_rest_p=0.1,
97
        epsilon=0.01,
98
        distribution="normal",
99
        n_neighbours=10,
100
        repulsion_factor=5,
101
        n_iter=100,
102
        verbose=False,
103
        experiment=None,
104
    ):
105
        self.random_state = random_state
106
        self.rand_rest_p = rand_rest_p
107
        self.epsilon = epsilon
108
        self.distribution = distribution
109
        self.n_neighbours = n_neighbours
110
        self.search_space = search_space
111
        self.initialize = initialize
112
        self.constraints = constraints
113
        self.repulsion_factor = repulsion_factor
114
        self.n_iter = n_iter
115
        self.experiment = experiment
116
        self.verbose = verbose
117
118
        super().__init__()
119
120
    def _get_gfo_class(self):
121
        """Get the GFO class to use.
122
123
        Returns
124
        -------
125
        class
126
            The GFO class to use. One of the concrete GFO classes
127
        """
128
        from gradient_free_optimizers import RepulsingHillClimbingOptimizer
129
130
        return RepulsingHillClimbingOptimizer
131
132
    @classmethod
133
    def get_test_params(cls, parameter_set="default"):
134
        """Get the test parameters for the optimizer.
135
136
        Returns
137
        -------
138
        dict with str keys
139
            The test parameters dictionary.
140
        """
141
        params = super().get_test_params()
142
        experiment = params[0]["experiment"]
143
        more_params = {
144
            "experiment": experiment,
145
            "repulsion_factor": 7,
146
            "search_space": {
147
                "C": [0.01, 0.1, 1, 10],
148
                "gamma": [0.0001, 0.01, 0.1, 1, 10],
149
            },
150
            "n_iter": 100,
151
        }
152
        params.append(more_params)
153
        return params
154

src/hyperactive/opt/gfo/_random_restart_hill_climbing.py 1 location

@@ 4-141 (lines=138) @@
1
from hyperactive.opt._adapters._gfo import _BaseGFOadapter
2
3
4
class RandomRestartHillClimbing(_BaseGFOadapter):
5
    """Random restart hill climbing optimizer.
6
7
    Parameters
8
    ----------
9
    search_space : dict[str, list]
10
        The search space to explore. A dictionary with parameter
11
        names as keys and a numpy array as values.
12
    initialize : dict[str, int]
13
        The method to generate initial positions. A dictionary with
14
        the following key literals and the corresponding value type:
15
        {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]}
16
    constraints : list[callable]
17
        A list of constraints, where each constraint is a callable.
18
        The callable returns `True` or `False` dependend on the input parameters.
19
    random_state : None, int
20
        If None, create a new random state. If int, create a new random state
21
        seeded with the value.
22
    rand_rest_p : float
23
        The probability of a random iteration during the the search process.
24
    epsilon : float
25
        The step-size for the climbing.
26
    distribution : str
27
        The type of distribution to sample from.
28
    n_neighbours : int
29
        The number of neighbours to sample and evaluate before moving to the best
30
        of those neighbours.
31
    n_iter_restart : int
32
        The number of iterations after which to restart at a random position.
33
34
    Examples
35
    --------
36
    Basic usage of RandomRestartHillClimbing with a scikit-learn experiment:
37
38
    1. defining the experiment to optimize:
39
    >>> from hyperactive.experiment.integrations import SklearnCvExperiment
40
    >>> from sklearn.datasets import load_iris
41
    >>> from sklearn.svm import SVC
42
    >>>
43
    >>> X, y = load_iris(return_X_y=True)
44
    >>>
45
    >>> sklearn_exp = SklearnCvExperiment(
46
    ...     estimator=SVC(),
47
    ...     X=X,
48
    ...     y=y,
49
    ... )
50
51
    2. setting up the randomRestartHillClimbing optimizer:
52
    >>> from hyperactive.opt import RandomRestartHillClimbing
53
    >>> import numpy as np
54
    >>>
55
    >>> config = {
56
    ...     "search_space": {
57
    ...         "C": [0.01, 0.1, 1, 10],
58
    ...         "gamma": [0.0001, 0.01, 0.1, 1, 10],
59
    ...     },
60
    ...     "n_iter": 100,
61
    ... }
62
    >>> optimizer = RandomRestartHillClimbing(experiment=sklearn_exp, **config)
63
64
    3. running the optimization:
65
    >>> best_params = optimizer.solve()
66
67
    Best parameters can also be accessed via:
68
    >>> best_params = optimizer.best_params_
69
    """
70
71
    _tags = {
72
        "info:name": "Random Restart Hill Climbing",
73
        "info:local_vs_global": "local",
74
        "info:explore_vs_exploit": "mixed",
75
        "info:compute": "middle",
76
    }
77
78
    def __init__(
79
        self,
80
        search_space=None,
81
        initialize=None,
82
        constraints=None,
83
        random_state=None,
84
        rand_rest_p=0.1,
85
        epsilon=0.01,
86
        distribution="normal",
87
        n_neighbours=10,
88
        n_iter_restart=0.5,
89
        n_iter=100,
90
        verbose=False,
91
        experiment=None,
92
    ):
93
        self.random_state = random_state
94
        self.rand_rest_p = rand_rest_p
95
        self.epsilon = epsilon
96
        self.distribution = distribution
97
        self.n_neighbours = n_neighbours
98
        self.n_iter_restart = n_iter_restart
99
        self.search_space = search_space
100
        self.initialize = initialize
101
        self.constraints = constraints
102
        self.n_iter = n_iter
103
        self.experiment = experiment
104
        self.verbose = verbose
105
106
        super().__init__()
107
108
    def _get_gfo_class(self):
109
        """Get the GFO class to use.
110
111
        Returns
112
        -------
113
        class
114
            The GFO class to use. One of the concrete GFO classes
115
        """
116
        from gradient_free_optimizers import RandomRestartHillClimbingOptimizer
117
118
        return RandomRestartHillClimbingOptimizer
119
120
    @classmethod
121
    def get_test_params(cls, parameter_set="default"):
122
        """Get the test parameters for the optimizer.
123
124
        Returns
125
        -------
126
        dict with str keys
127
            The test parameters dictionary.
128
        """
129
        params = super().get_test_params()
130
        experiment = params[0]["experiment"]
131
        more_params = {
132
            "experiment": experiment,
133
            "n_iter_restart": 2,
134
            "search_space": {
135
                "C": [0.01, 0.1, 1, 10],
136
                "gamma": [0.0001, 0.01, 0.1, 1, 10],
137
            },
138
            "n_iter": 100,
139
        }
140
        params.append(more_params)
141
        return params
142

src/hyperactive/opt/gfo/_powells_method.py 1 location

@@ 4-140 (lines=137) @@
1
from hyperactive.opt._adapters._gfo import _BaseGFOadapter
2
3
4
class PowellsMethod(_BaseGFOadapter):
5
    """Powell's method optimizer.
6
7
    Parameters
8
    ----------
9
    search_space : dict[str, list]
10
        The search space to explore. A dictionary with parameter
11
        names as keys and a numpy array as values.
12
    initialize : dict[str, int]
13
        The method to generate initial positions. A dictionary with
14
        the following key literals and the corresponding value type:
15
        {"grid": int, "vertices": int, "random": int, "warm_start": list[dict]}
16
    constraints : list[callable]
17
        A list of constraints, where each constraint is a callable.
18
        The callable returns `True` or `False` dependend on the input parameters.
19
    random_state : None, int
20
        If None, create a new random state. If int, create a new random state
21
        seeded with the value.
22
    rand_rest_p : float
23
        The probability of a random iteration during the search process.
24
    epsilon : float
25
        The step-size for the climbing.
26
    distribution : str
27
        The type of distribution to sample from.
28
    n_neighbours : int
29
        The number of neighbours to sample and evaluate before moving to the best
30
        of those neighbours.
31
    n_iter : int, default=100
32
        The number of iterations to run the optimizer.
33
    verbose : bool, default=False
34
        If True, print the progress of the optimization process.
35
    experiment : BaseExperiment, optional
36
        The experiment to optimize parameters for.
37
        Optional, can be passed later via ``set_params``.
38
39
    Examples
40
    --------
41
    Basic usage of PowellsMethod with a scikit-learn experiment:
42
43
    1. defining the experiment to optimize:
44
    >>> from hyperactive.experiment.integrations import SklearnCvExperiment
45
    >>> from sklearn.datasets import load_iris
46
    >>> from sklearn.svm import SVC
47
    >>>
48
    >>> X, y = load_iris(return_X_y=True)
49
    >>>
50
    >>> sklearn_exp = SklearnCvExperiment(
51
    ...     estimator=SVC(),
52
    ...     X=X,
53
    ...     y=y,
54
    ... )
55
56
    2. setting up the powellsMethod optimizer:
57
    >>> from hyperactive.opt import PowellsMethod
58
    >>> import numpy as np
59
    >>>
60
    >>> config = {
61
    ...     "search_space": {
62
    ...         "C": [0.01, 0.1, 1, 10],
63
    ...         "gamma": [0.0001, 0.01, 0.1, 1, 10],
64
    ...     },
65
    ...     "n_iter": 100,
66
    ... }
67
    >>> optimizer = PowellsMethod(experiment=sklearn_exp, **config)
68
69
    3. running the optimization:
70
    >>> best_params = optimizer.solve()
71
72
    Best parameters can also be accessed via:
73
    >>> best_params = optimizer.best_params_
74
    """
75
76
    _tags = {
77
        "info:name": "Powell's Method",
78
        "info:local_vs_global": "local",
79
        "info:explore_vs_exploit": "exploit",
80
        "info:compute": "low",
81
    }
82
83
    def __init__(
84
        self,
85
        search_space=None,
86
        initialize=None,
87
        constraints=None,
88
        random_state=None,
89
        rand_rest_p=0.1,
90
        iters_p_dim=10,
91
        n_iter=100,
92
        verbose=False,
93
        experiment=None,
94
    ):
95
        self.random_state = random_state
96
        self.rand_rest_p = rand_rest_p
97
        self.iters_p_dim = iters_p_dim
98
        self.search_space = search_space
99
        self.initialize = initialize
100
        self.constraints = constraints
101
        self.n_iter = n_iter
102
        self.experiment = experiment
103
        self.verbose = verbose
104
105
        super().__init__()
106
107
    def _get_gfo_class(self):
108
        """Get the GFO class to use.
109
110
        Returns
111
        -------
112
        class
113
            The GFO class to use. One of the concrete GFO classes
114
        """
115
        from gradient_free_optimizers import PowellsMethod
116
117
        return PowellsMethod
118
119
    @classmethod
120
    def get_test_params(cls, parameter_set="default"):
121
        """Get the test parameters for the optimizer.
122
123
        Returns
124
        -------
125
        dict with str keys
126
            The test parameters dictionary.
127
        """
128
        params = super().get_test_params()
129
        experiment = params[0]["experiment"]
130
        more_params = {
131
            "experiment": experiment,
132
            "iters_p_dim": 3,
133
            "search_space": {
134
                "C": [0.01, 0.1, 1, 10],
135
                "gamma": [0.0001, 0.01, 0.1, 1, 10],
136
            },
137
            "n_iter": 100,
138
        }
139
        params.append(more_params)
140
        return params
141