1 | <?php |
||
11 | class LogisticRegression extends Adaline |
||
12 | { |
||
13 | /** |
||
14 | * Batch training: Gradient descent algorithm (default) |
||
15 | */ |
||
16 | public const BATCH_TRAINING = 1; |
||
17 | |||
18 | /** |
||
19 | * Online training: Stochastic gradient descent learning |
||
20 | */ |
||
21 | public const ONLINE_TRAINING = 2; |
||
22 | |||
23 | /** |
||
24 | * Conjugate Batch: Conjugate Gradient algorithm |
||
25 | */ |
||
26 | public const CONJUGATE_GRAD_TRAINING = 3; |
||
27 | |||
28 | /** |
||
29 | * Cost function to optimize: 'log' and 'sse' are supported <br> |
||
30 | * - 'log' : log likelihood <br> |
||
31 | * - 'sse' : sum of squared errors <br> |
||
32 | * |
||
33 | * @var string |
||
34 | */ |
||
35 | protected $costFunction = 'log'; |
||
36 | |||
37 | /** |
||
38 | * Regularization term: only 'L2' is supported |
||
39 | * |
||
40 | * @var string |
||
41 | */ |
||
42 | protected $penalty = 'L2'; |
||
43 | |||
44 | /** |
||
45 | * Lambda (λ) parameter of regularization term. If λ is set to 0, then |
||
46 | * regularization term is cancelled. |
||
47 | * |
||
48 | * @var float |
||
49 | */ |
||
50 | protected $lambda = 0.5; |
||
51 | |||
52 | /** |
||
53 | * Initalize a Logistic Regression classifier with maximum number of iterations |
||
54 | * and learning rule to be applied <br> |
||
55 | * |
||
56 | * Maximum number of iterations can be an integer value greater than 0 <br> |
||
57 | * If normalizeInputs is set to true, then every input given to the algorithm will be standardized |
||
58 | * by use of standard deviation and mean calculation <br> |
||
59 | * |
||
60 | * Cost function can be 'log' for log-likelihood and 'sse' for sum of squared errors <br> |
||
61 | * |
||
62 | * Penalty (Regularization term) can be 'L2' or empty string to cancel penalty term |
||
63 | * |
||
64 | * @throws \Exception |
||
65 | */ |
||
66 | public function __construct( |
||
97 | |||
98 | /** |
||
99 | * Sets the learning rate if gradient descent algorithm is |
||
100 | * selected for training |
||
101 | */ |
||
102 | public function setLearningRate(float $learningRate): void |
||
106 | |||
107 | /** |
||
108 | * Lambda (λ) parameter of regularization term. If 0 is given, |
||
109 | * then the regularization term is cancelled |
||
110 | */ |
||
111 | public function setLambda(float $lambda): void |
||
115 | |||
116 | /** |
||
117 | * Adapts the weights with respect to given samples and targets |
||
118 | * by use of selected solver |
||
119 | * |
||
120 | * @throws \Exception |
||
121 | */ |
||
122 | protected function runTraining(array $samples, array $targets) |
||
140 | |||
141 | /** |
||
142 | * Executes Conjugate Gradient method to optimize the weights of the LogReg model |
||
143 | */ |
||
144 | protected function runConjugateGradient(array $samples, array $targets, Closure $gradientFunc): void |
||
154 | |||
155 | /** |
||
156 | * Returns the appropriate callback function for the selected cost function |
||
157 | * |
||
158 | * @throws \Exception |
||
159 | */ |
||
160 | protected function getCostFunction(): Closure |
||
232 | |||
233 | /** |
||
234 | * Returns the output of the network, a float value between 0.0 and 1.0 |
||
235 | */ |
||
236 | protected function output(array $sample): float |
||
242 | |||
243 | /** |
||
244 | * Returns the class value (either -1 or 1) for the given input |
||
245 | */ |
||
246 | protected function outputClass(array $sample): int |
||
256 | |||
257 | /** |
||
258 | * Returns the probability of the sample of belonging to the given label. |
||
259 | * |
||
260 | * The probability is simply taken as the distance of the sample |
||
261 | * to the decision plane. |
||
262 | * |
||
263 | * @param mixed $label |
||
264 | */ |
||
265 | protected function predictProbability(array $sample, $label): float |
||
276 | } |
||
277 |
This check looks for assignments to scalar types that may be of the wrong type.
To ensure the code behaves as expected, it may be a good idea to add an explicit type cast.