Total Complexity | 3 |
Total Lines | 33 |
Duplicated Lines | 0 % |
Changes | 0 |
1 | # Author: Simon Blanke |
||
2 | # Email: [email protected] |
||
3 | # License: MIT License |
||
4 | |||
5 | |||
6 | import numpy as np |
||
7 | |||
8 | from ..local_opt import StochasticHillClimbingOptimizer |
||
9 | |||
10 | |||
11 | class SimulatedAnnealingOptimizer(StochasticHillClimbingOptimizer): |
||
12 | name = "Simulated Annealing" |
||
13 | _name_ = "simulated_annealing" |
||
14 | __name__ = "SimulatedAnnealingOptimizer" |
||
15 | |||
16 | optimizer_type = "local" |
||
17 | computationally_expensive = False |
||
18 | |||
19 | def __init__(self, *args, annealing_rate=0.97, start_temp=1, **kwargs): |
||
20 | super().__init__(*args, **kwargs) |
||
21 | |||
22 | self.annealing_rate = annealing_rate |
||
23 | self.start_temp = start_temp |
||
24 | self.temp = start_temp |
||
25 | |||
26 | def _p_accept_default(self): |
||
27 | # the 'minus' is omitted, because we maximize a score |
||
28 | return np.exp(self._exponent) |
||
29 | |||
30 | def evaluate(self, score_new): |
||
31 | StochasticHillClimbingOptimizer.evaluate(self, score_new) |
||
32 | self.temp *= self.annealing_rate |
||
33 |