Passed
Push — master ( 626f23...be3b1e )
by Simon
01:48
created

gradient_free_optimizers.optimizers.local_opt.stochastic_hill_climbing   A

Complexity

Total Complexity 16

Size/Duplication

Total Lines 78
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 52
dl 0
loc 78
rs 10
c 0
b 0
f 0
wmc 16

8 Methods

Rating   Name   Duplication   Size   Complexity  
A StochasticHillClimbingOptimizer._transition() 0 4 2
A StochasticHillClimbingOptimizer._consider() 0 5 2
A StochasticHillClimbingOptimizer._accept_adapt() 0 2 1
A StochasticHillClimbingOptimizer._accept_default() 0 2 1
A StochasticHillClimbingOptimizer.__init__() 0 16 2
A StochasticHillClimbingOptimizer.evaluate() 0 5 1
A StochasticHillClimbingOptimizer._score_norm_default() 0 9 3
A StochasticHillClimbingOptimizer._score_norm_adapt() 0 13 4
1
# Author: Simon Blanke
2
# Email: [email protected]
3
# License: MIT License
4
5
import random
6
import numpy as np
7
8
from . import HillClimbingOptimizer
9
from ...search import Search
10
11
12
class StochasticHillClimbingOptimizer(HillClimbingOptimizer, Search):
13
    name = "Stochastic Hill Climbing"
14
15
    def __init__(
16
        self,
17
        *args,
18
        p_accept=0.1,
19
        norm_factor=1,
20
        **kwargs,
21
    ):
22
        super().__init__(*args, **kwargs)
23
        self.p_accept = p_accept
24
        self.norm_factor = norm_factor
25
26
        if self.norm_factor == "adaptive":
27
            self._accept = self._accept_adapt
28
            self.diff_max = 0
29
        else:
30
            self._accept = self._accept_default
31
32
    def _consider(self, p_accept):
33
        rand = random.uniform(0, self.p_accept)
34
35
        if rand > p_accept:
36
            self._new2current()
37
38
    def _score_norm_default(self):
39
        denom = self.score_current + self.score_new
40
41
        if denom == 0:
42
            return 1
43
        elif abs(denom) == np.inf:
44
            return 0
45
        else:
46
            return self.norm_factor * (self.score_current - self.score_new) / denom
47
48
    def _score_norm_adapt(self):
49
        diff = abs(self.score_current - self.score_new)
50
        if self.diff_max < diff:
51
            self.diff_max = diff
52
53
        denom = self.diff_max + diff
54
55
        if denom == 0:
56
            return 1
57
        elif abs(denom) == np.inf:
58
            return 0
59
        else:
60
            return abs(self.diff_max - diff) / denom
61
62
    def _accept_default(self):
63
        return np.exp(-self._score_norm_default())
64
65
    def _accept_adapt(self):
66
        return self._score_norm_adapt()
67
68
    def _transition(self, score_new):
69
        if score_new <= self.score_current:
70
            p_accept = self._accept()
71
            self._consider(p_accept)
72
73
    def evaluate(self, score_new):
74
        HillClimbingOptimizer.evaluate(self, score_new)
75
76
        # self._evaluate_new2current(score_new)
77
        self._transition(score_new)
78
        # self._evaluate_current2best()
79