1 | <?php |
||
11 | class LogisticRegression extends Adaline |
||
12 | { |
||
13 | /** |
||
14 | * Batch training: Gradient descent algorithm (default) |
||
15 | */ |
||
16 | public const BATCH_TRAINING = 1; |
||
17 | |||
18 | /** |
||
19 | * Online training: Stochastic gradient descent learning |
||
20 | */ |
||
21 | public const ONLINE_TRAINING = 2; |
||
22 | |||
23 | /** |
||
24 | * Conjugate Batch: Conjugate Gradient algorithm |
||
25 | */ |
||
26 | public const CONJUGATE_GRAD_TRAINING = 3; |
||
27 | |||
28 | /** |
||
29 | * Cost function to optimize: 'log' and 'sse' are supported <br> |
||
30 | * - 'log' : log likelihood <br> |
||
31 | * - 'sse' : sum of squared errors <br> |
||
32 | * |
||
33 | * @var string |
||
34 | */ |
||
35 | protected $costFunction = 'log'; |
||
36 | |||
37 | /** |
||
38 | * Regularization term: only 'L2' is supported |
||
39 | * |
||
40 | * @var string |
||
41 | */ |
||
42 | protected $penalty = 'L2'; |
||
43 | |||
44 | /** |
||
45 | * Lambda (λ) parameter of regularization term. If λ is set to 0, then |
||
46 | * regularization term is cancelled. |
||
47 | * |
||
48 | * @var float |
||
49 | */ |
||
50 | protected $lambda = 0.5; |
||
51 | |||
52 | /** |
||
53 | * Initalize a Logistic Regression classifier with maximum number of iterations |
||
54 | * and learning rule to be applied <br> |
||
55 | * |
||
56 | * Maximum number of iterations can be an integer value greater than 0 <br> |
||
57 | * If normalizeInputs is set to true, then every input given to the algorithm will be standardized |
||
58 | * by use of standard deviation and mean calculation <br> |
||
59 | * |
||
60 | * Cost function can be 'log' for log-likelihood and 'sse' for sum of squared errors <br> |
||
61 | * |
||
62 | * Penalty (Regularization term) can be 'L2' or empty string to cancel penalty term |
||
63 | * |
||
64 | * @throws \Exception |
||
65 | */ |
||
66 | public function __construct( |
||
67 | int $maxIterations = 500, |
||
68 | bool $normalizeInputs = true, |
||
69 | int $trainingType = self::CONJUGATE_GRAD_TRAINING, |
||
70 | string $cost = 'log', |
||
71 | string $penalty = 'L2' |
||
72 | ) { |
||
73 | $trainingTypes = range(self::BATCH_TRAINING, self::CONJUGATE_GRAD_TRAINING); |
||
74 | if (!in_array($trainingType, $trainingTypes)) { |
||
75 | throw new Exception('Logistic regression can only be trained with '. |
||
76 | 'batch (gradient descent), online (stochastic gradient descent) '. |
||
77 | 'or conjugate batch (conjugate gradients) algorithms'); |
||
78 | } |
||
79 | |||
80 | if (!in_array($cost, ['log', 'sse'])) { |
||
81 | throw new Exception("Logistic regression cost function can be one of the following: \n". |
||
82 | "'log' for log-likelihood and 'sse' for sum of squared errors"); |
||
83 | } |
||
84 | |||
85 | if ($penalty != '' && strtoupper($penalty) !== 'L2') { |
||
86 | throw new Exception("Logistic regression supports only 'L2' regularization"); |
||
87 | } |
||
88 | |||
89 | $this->learningRate = 0.001; |
||
90 | |||
91 | parent::__construct($this->learningRate, $maxIterations, $normalizeInputs); |
||
92 | |||
93 | $this->trainingType = $trainingType; |
||
94 | $this->costFunction = $cost; |
||
95 | $this->penalty = $penalty; |
||
96 | } |
||
97 | |||
98 | /** |
||
99 | * Sets the learning rate if gradient descent algorithm is |
||
100 | * selected for training |
||
101 | */ |
||
102 | public function setLearningRate(float $learningRate): void |
||
103 | { |
||
104 | $this->learningRate = $learningRate; |
||
105 | } |
||
106 | |||
107 | /** |
||
108 | * Lambda (λ) parameter of regularization term. If 0 is given, |
||
109 | * then the regularization term is cancelled |
||
110 | */ |
||
111 | public function setLambda(float $lambda): void |
||
112 | { |
||
113 | $this->lambda = $lambda; |
||
114 | } |
||
115 | |||
116 | /** |
||
117 | * Adapts the weights with respect to given samples and targets |
||
118 | * by use of selected solver |
||
119 | * |
||
120 | * @throws \Exception |
||
121 | */ |
||
122 | protected function runTraining(array $samples, array $targets): void |
||
123 | { |
||
124 | $callback = $this->getCostFunction(); |
||
125 | |||
126 | switch ($this->trainingType) { |
||
127 | case self::BATCH_TRAINING: |
||
128 | $this->runGradientDescent($samples, $targets, $callback, true); |
||
129 | |||
130 | return; |
||
131 | |||
132 | case self::ONLINE_TRAINING: |
||
133 | $this->runGradientDescent($samples, $targets, $callback, false); |
||
134 | |||
135 | return; |
||
136 | |||
137 | case self::CONJUGATE_GRAD_TRAINING: |
||
138 | $this->runConjugateGradient($samples, $targets, $callback); |
||
139 | |||
140 | return; |
||
141 | |||
142 | default: |
||
143 | throw new Exception('Logistic regression has invalid training type: %s.', $this->trainingType); |
||
144 | } |
||
145 | } |
||
146 | |||
147 | /** |
||
148 | * Executes Conjugate Gradient method to optimize the weights of the LogReg model |
||
149 | */ |
||
150 | protected function runConjugateGradient(array $samples, array $targets, Closure $gradientFunc): void |
||
160 | |||
161 | /** |
||
162 | * Returns the appropriate callback function for the selected cost function |
||
163 | * |
||
164 | * @throws \Exception |
||
165 | */ |
||
166 | protected function getCostFunction(): Closure |
||
238 | |||
239 | /** |
||
240 | * Returns the output of the network, a float value between 0.0 and 1.0 |
||
241 | */ |
||
242 | protected function output(array $sample): float |
||
248 | |||
249 | /** |
||
250 | * Returns the class value (either -1 or 1) for the given input |
||
251 | */ |
||
252 | protected function outputClass(array $sample): int |
||
262 | |||
263 | /** |
||
264 | * Returns the probability of the sample of belonging to the given label. |
||
265 | * |
||
266 | * The probability is simply taken as the distance of the sample |
||
267 | * to the decision plane. |
||
268 | * |
||
269 | * @param mixed $label |
||
270 | */ |
||
271 | protected function predictProbability(array $sample, $label): float |
||
282 | } |
||
283 |