Passed
Push — master ( d39371...69bf6f )
by Simon
03:38
created

gradient_free_optimizers.optimizers.local.stochastic_hill_climbing   A

Complexity

Total Complexity 16

Size/Duplication

Total Lines 74
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 48
dl 0
loc 74
rs 10
c 0
b 0
f 0
wmc 16

8 Methods

Rating   Name   Duplication   Size   Complexity  
A StochasticHillClimbingOptimizer._accept_default() 0 2 1
A StochasticHillClimbingOptimizer._transition() 0 4 2
A StochasticHillClimbingOptimizer._accept_adapt() 0 2 1
A StochasticHillClimbingOptimizer.evaluate() 0 6 1
A StochasticHillClimbingOptimizer._score_norm_adapt() 0 13 4
A StochasticHillClimbingOptimizer._consider() 0 8 2
A StochasticHillClimbingOptimizer._score_norm_default() 0 9 3
A StochasticHillClimbingOptimizer.__init__() 0 10 2
1
# Author: Simon Blanke
2
# Email: [email protected]
3
# License: MIT License
4
5
import random
6
import numpy as np
7
8
from . import HillClimbingOptimizer
9
from ...search import Search
10
11
12
class StochasticHillClimbingOptimizer(HillClimbingOptimizer, Search):
13
    def __init__(self, search_space, p_down=0.1, norm_factor=1):
14
        super().__init__(search_space)
15
        self.p_down = p_down
16
        self.norm_factor = norm_factor
17
18
        if self.norm_factor == "adaptive":
19
            self._accept = self._accept_adapt
20
            self.diff_max = 0
21
        else:
22
            self._accept = self._accept_default
23
24
    def _consider(self, p_accept):
25
        rand = random.uniform(0, self.p_down)
26
27
        # print("\np_accept", p_accept)
28
        # print("rand", rand)
29
30
        if p_accept > 1 - rand:
31
            self._new2current()
32
33
    def _score_norm_default(self):
34
        denom = self.score_current + self.score_new
35
36
        if denom == 0:
37
            return 1
38
        elif abs(denom) == np.inf:
39
            return 0
40
        else:
41
            return self.norm_factor * (self.score_current - self.score_new) / denom
42
43
    def _score_norm_adapt(self):
44
        diff = abs(self.score_current - self.score_new)
45
        if self.diff_max < diff:
46
            self.diff_max = diff
47
48
        denom = self.diff_max + diff
49
50
        if denom == 0:
51
            return 1
52
        elif abs(denom) == np.inf:
53
            return 0
54
        else:
55
            return abs(self.diff_max - diff) / denom
56
57
    def _accept_default(self):
58
        return np.exp(-self._score_norm_default())
59
60
    def _accept_adapt(self):
61
        return self._score_norm_adapt()
62
63
    def _transition(self, score_new):
64
        if score_new <= self.score_current:
65
            p_accept = self._accept()
66
            self._consider(p_accept)
67
68
    def evaluate(self, score_new):
69
        self.score_new = score_new
70
71
        self._evaluate_new2current(score_new)
72
        self._transition(score_new)
73
        self._evaluate_current2best()
74