Adaline::__construct()   A
last analyzed

Complexity

Conditions 2
Paths 2

Size

Total Lines 13
Code Lines 4

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 4
dl 0
loc 13
rs 10
c 0
b 0
f 0
cc 2
nc 2
nop 4
1
<?php
2
3
declare(strict_types=1);
4
5
namespace Phpml\Classification\Linear;
6
7
use Phpml\Exception\InvalidArgumentException;
8
9
class Adaline extends Perceptron
10
{
11
    /**
12
     * Batch training is the default Adaline training algorithm
13
     */
14
    public const BATCH_TRAINING = 1;
15
16
    /**
17
     * Online training: Stochastic gradient descent learning
18
     */
19
    public const ONLINE_TRAINING = 2;
20
21
    /**
22
     * Training type may be either 'Batch' or 'Online' learning
23
     *
24
     * @var string|int
25
     */
26
    protected $trainingType;
27
28
    /**
29
     * Initalize an Adaline (ADAptive LInear NEuron) classifier with given learning rate and maximum
30
     * number of iterations used while training the classifier <br>
31
     *
32
     * Learning rate should be a float value between 0.0(exclusive) and 1.0 (inclusive) <br>
33
     * Maximum number of iterations can be an integer value greater than 0 <br>
34
     * If normalizeInputs is set to true, then every input given to the algorithm will be standardized
35
     * by use of standard deviation and mean calculation
36
     *
37
     * @throws InvalidArgumentException
38
     */
39
    public function __construct(
40
        float $learningRate = 0.001,
41
        int $maxIterations = 1000,
42
        bool $normalizeInputs = true,
43
        int $trainingType = self::BATCH_TRAINING
44
    ) {
45
        if (!in_array($trainingType, [self::BATCH_TRAINING, self::ONLINE_TRAINING], true)) {
46
            throw new InvalidArgumentException('Adaline can only be trained with batch and online/stochastic gradient descent algorithm');
47
        }
48
49
        $this->trainingType = $trainingType;
50
51
        parent::__construct($learningRate, $maxIterations, $normalizeInputs);
52
    }
53
54
    /**
55
     * Adapts the weights with respect to given samples and targets
56
     * by use of gradient descent learning rule
57
     */
58
    protected function runTraining(array $samples, array $targets): void
59
    {
60
        // The cost function is the sum of squares
61
        $callback = function ($weights, $sample, $target): array {
62
            $this->weights = $weights;
63
64
            $output = $this->output($sample);
65
            $gradient = $output - $target;
66
            $error = $gradient ** 2;
67
68
            return [$error, $gradient];
69
        };
70
71
        $isBatch = $this->trainingType == self::BATCH_TRAINING;
72
73
        parent::runGradientDescent($samples, $targets, $callback, $isBatch);
74
    }
75
}
76