|
1
|
|
|
<?php |
|
2
|
|
|
|
|
3
|
|
|
declare(strict_types=1); |
|
4
|
|
|
|
|
5
|
|
|
namespace Phpml\Classification\Linear; |
|
6
|
|
|
|
|
7
|
|
|
use Phpml\Helper\Predictable; |
|
8
|
|
|
use Phpml\Helper\Trainable; |
|
9
|
|
|
use Phpml\Classification\Classifier; |
|
10
|
|
|
use Phpml\Classification\Linear\Perceptron; |
|
11
|
|
|
use Phpml\Preprocessing\Normalizer; |
|
12
|
|
|
|
|
13
|
|
|
class Adaline extends Perceptron |
|
14
|
|
|
{ |
|
15
|
|
|
|
|
16
|
|
|
/** |
|
17
|
|
|
* Batch training is the default Adaline training algorithm |
|
18
|
|
|
*/ |
|
19
|
|
|
const BATCH_TRAINING = 1; |
|
20
|
|
|
|
|
21
|
|
|
/** |
|
22
|
|
|
* Online training: Stochastic gradient descent learning |
|
23
|
|
|
*/ |
|
24
|
|
|
const ONLINE_TRAINING = 2; |
|
25
|
|
|
|
|
26
|
|
|
/** |
|
27
|
|
|
* The function whose result will be used to calculate the network error |
|
28
|
|
|
* for each instance |
|
29
|
|
|
* |
|
30
|
|
|
* @var string |
|
31
|
|
|
*/ |
|
32
|
|
|
protected static $errorFunction = 'output'; |
|
33
|
|
|
|
|
34
|
|
|
/** |
|
35
|
|
|
* Training type may be either 'Batch' or 'Online' learning |
|
36
|
|
|
* |
|
37
|
|
|
* @var string |
|
38
|
|
|
*/ |
|
39
|
|
|
protected $trainingType; |
|
40
|
|
|
|
|
41
|
|
|
/** |
|
42
|
|
|
* @var Normalizer |
|
43
|
|
|
*/ |
|
44
|
|
|
private $normalizer; |
|
45
|
|
|
|
|
46
|
|
|
/** |
|
47
|
|
|
* Initalize an Adaline (ADAptive LInear NEuron) classifier with given learning rate and maximum |
|
48
|
|
|
* number of iterations used while training the classifier <br> |
|
49
|
|
|
* |
|
50
|
|
|
* Learning rate should be a float value between 0.0(exclusive) and 1.0 (inclusive) <br> |
|
51
|
|
|
* Maximum number of iterations can be an integer value greater than 0 <br> |
|
52
|
|
|
* If normalizeInputs is set to true, then every input given to the algorithm will be standardized |
|
53
|
|
|
* by use of standard deviation and mean calculation |
|
54
|
|
|
* |
|
55
|
|
|
* @param int $learningRate |
|
56
|
|
|
* @param int $maxIterations |
|
57
|
|
|
*/ |
|
58
|
|
|
public function __construct(float $learningRate = 0.001, int $maxIterations = 1000, |
|
59
|
|
|
bool $normalizeInputs = true, int $trainingType = self::BATCH_TRAINING) |
|
60
|
|
|
{ |
|
61
|
|
|
if ($normalizeInputs) { |
|
62
|
|
|
$this->normalizer = new Normalizer(Normalizer::NORM_STD); |
|
63
|
|
|
} |
|
64
|
|
|
|
|
65
|
|
|
if (! in_array($trainingType, [self::BATCH_TRAINING, self::ONLINE_TRAINING])) { |
|
66
|
|
|
throw new \Exception("Adaline can only be trained with batch and online/stochastic gradient descent algorithm"); |
|
67
|
|
|
} |
|
68
|
|
|
$this->trainingType = $trainingType; |
|
|
|
|
|
|
69
|
|
|
|
|
70
|
|
|
parent::__construct($learningRate, $maxIterations); |
|
71
|
|
|
} |
|
72
|
|
|
|
|
73
|
|
|
/** |
|
74
|
|
|
* @param array $samples |
|
75
|
|
|
* @param array $targets |
|
76
|
|
|
*/ |
|
77
|
|
|
public function train(array $samples, array $targets) |
|
78
|
|
|
{ |
|
79
|
|
|
if ($this->normalizer) { |
|
80
|
|
|
$this->normalizer->transform($samples); |
|
81
|
|
|
} |
|
82
|
|
|
|
|
83
|
|
|
parent::train($samples, $targets); |
|
84
|
|
|
} |
|
85
|
|
|
|
|
86
|
|
|
/** |
|
87
|
|
|
* Adapts the weights with respect to given samples and targets |
|
88
|
|
|
* by use of gradient descent learning rule |
|
89
|
|
|
*/ |
|
90
|
|
|
protected function runTraining() |
|
91
|
|
|
{ |
|
92
|
|
|
// If online training is chosen, then the parent runTraining method |
|
93
|
|
|
// will be executed with the 'output' method as the error function |
|
94
|
|
|
if ($this->trainingType == self::ONLINE_TRAINING) { |
|
95
|
|
|
return parent::runTraining(); |
|
96
|
|
|
} |
|
97
|
|
|
|
|
98
|
|
|
// Batch learning is executed: |
|
99
|
|
|
$currIter = 0; |
|
100
|
|
|
while ($this->maxIterations > $currIter++) { |
|
101
|
|
|
$outputs = array_map([$this, 'output'], $this->samples); |
|
102
|
|
|
$updates = array_map([$this, 'gradient'], $this->targets, $outputs); |
|
103
|
|
|
$sum = array_sum($updates); |
|
104
|
|
|
|
|
105
|
|
|
// Updates all weights at once |
|
106
|
|
|
for ($i=0; $i <= $this->featureCount; $i++) { |
|
107
|
|
|
if ($i == 0) { |
|
108
|
|
|
$this->weights[0] += $this->learningRate * $sum; |
|
109
|
|
|
} else { |
|
110
|
|
|
$col = array_column($this->samples, $i - 1); |
|
111
|
|
|
$error = 0; |
|
112
|
|
|
foreach ($col as $index => $val) { |
|
113
|
|
|
$error += $val * $updates[$index]; |
|
114
|
|
|
} |
|
115
|
|
|
|
|
116
|
|
|
$this->weights[$i] += $this->learningRate * $error; |
|
117
|
|
|
} |
|
118
|
|
|
} |
|
119
|
|
|
} |
|
120
|
|
|
} |
|
121
|
|
|
|
|
122
|
|
|
/** |
|
123
|
|
|
* Returns the direction of gradient given the desired and actual outputs |
|
124
|
|
|
* |
|
125
|
|
|
* @param int $desired |
|
126
|
|
|
* @param int $output |
|
127
|
|
|
* @return int |
|
128
|
|
|
*/ |
|
129
|
|
|
protected function gradient($desired, $output) |
|
130
|
|
|
{ |
|
131
|
|
|
return $desired - $output; |
|
132
|
|
|
} |
|
133
|
|
|
|
|
134
|
|
|
/** |
|
135
|
|
|
* @param array $sample |
|
136
|
|
|
* @return mixed |
|
137
|
|
|
*/ |
|
138
|
|
|
public function predictSample(array $sample) |
|
139
|
|
|
{ |
|
140
|
|
|
if ($this->normalizer) { |
|
141
|
|
|
$samples = [$sample]; |
|
142
|
|
|
$this->normalizer->transform($samples); |
|
143
|
|
|
$sample = $samples[0]; |
|
144
|
|
|
} |
|
145
|
|
|
|
|
146
|
|
|
return parent::predictSample($sample); |
|
147
|
|
|
} |
|
148
|
|
|
} |
|
149
|
|
|
|
This check looks for assignments to scalar types that may be of the wrong type.
To ensure the code behaves as expected, it may be a good idea to add an explicit type cast.