1
|
|
|
import numpy as np |
|
|
|
|
2
|
|
|
from scipy.optimize import fmin_l_bfgs_b |
|
|
|
|
3
|
|
|
|
4
|
|
|
from Orange.regression import Learner, Model |
5
|
|
|
from Orange.preprocess import Normalize, Continuize, Impute, RemoveNaNColumns |
6
|
|
|
|
7
|
|
|
__all__ = ["LinearRegressionLearner"] |
8
|
|
|
|
9
|
|
|
|
10
|
|
|
class LinearRegressionLearner(Learner): |
11
|
|
|
'''L2 regularized linear regression (a.k.a Ridge regression) |
12
|
|
|
|
13
|
|
|
This model uses the L-BFGS algorithm to minimize the linear least |
14
|
|
|
squares penalty with L2 regularization. When using this model you |
15
|
|
|
should: |
16
|
|
|
|
17
|
|
|
- Choose a suitable regularization parameter lambda_ |
18
|
|
|
- Consider appending a column of ones to the dataset (intercept term) |
19
|
|
|
|
20
|
|
|
Parameters |
21
|
|
|
---------- |
22
|
|
|
|
23
|
|
|
lambda\_ : float, optional (default=1.0) |
24
|
|
|
Regularization parameter. It controls trade-off between fitting the |
25
|
|
|
data and keeping parameters small. Higher values of lambda\_ force |
26
|
|
|
parameters to be smaller. |
27
|
|
|
|
28
|
|
|
preprocessors : list, optional (default="[Normalize(), Continuize(), Impute(), RemoveNaNColumns()]) |
29
|
|
|
Preprocessors are applied to data before training or testing. Default preprocessors |
30
|
|
|
- transform the dataset so that the columns are on a similar scale, |
31
|
|
|
- continuize all discrete attributes, |
32
|
|
|
- remove columns with all values as NaN |
33
|
|
|
- replace NaN values with suitable values |
34
|
|
|
|
35
|
|
|
fmin_args : dict, optional |
36
|
|
|
Parameters for L-BFGS algorithm. |
37
|
|
|
""" |
38
|
|
|
|
39
|
|
|
Examples |
40
|
|
|
-------- |
41
|
|
|
|
42
|
|
|
import numpy as np |
43
|
|
|
from Orange.data import Table |
44
|
|
|
from Orange.regression.linear_bfgs import LinearRegressionLearner |
45
|
|
|
|
46
|
|
|
data = Table('housing') |
47
|
|
|
data.X = np.hstack((data.X, np.ones((data.X.shape[0], 1)))) # append ones |
48
|
|
|
m = LinearRegressionLearner(lambda_=1.0) |
49
|
|
|
c = m(data) # fit |
50
|
|
|
print(c(data)) # predict |
51
|
|
|
''' |
52
|
|
|
name = 'linear_bfgs' |
53
|
|
|
preprocessors = [Normalize(), |
54
|
|
|
Continuize(), |
55
|
|
|
Impute(), |
56
|
|
|
RemoveNaNColumns()] |
57
|
|
|
|
58
|
|
|
def __init__(self, lambda_=1.0, preprocessors=None, **fmin_args): |
|
|
|
|
59
|
|
|
|
60
|
|
|
super().__init__(preprocessors=preprocessors) |
61
|
|
|
self.lambda_ = lambda_ |
62
|
|
|
self.fmin_args = fmin_args |
63
|
|
|
|
64
|
|
|
def cost_grad(self, theta, X, y): |
65
|
|
|
t = X.dot(theta) - y |
66
|
|
|
|
67
|
|
|
cost = t.dot(t) |
68
|
|
|
cost += self.lambda_ * theta.dot(theta) |
69
|
|
|
cost /= 2.0 * X.shape[0] |
70
|
|
|
|
71
|
|
|
grad = X.T.dot(t) |
72
|
|
|
grad += self.lambda_ * theta |
73
|
|
|
grad /= X.shape[0] |
74
|
|
|
|
75
|
|
|
return cost, grad |
76
|
|
|
|
77
|
|
|
def fit(self, X, Y, W): |
|
|
|
|
78
|
|
|
if len(Y.shape) > 1 and Y.shape[1] > 1: |
79
|
|
|
raise ValueError('Linear regression does not support ' |
80
|
|
|
'multi-target classification') |
81
|
|
|
|
82
|
|
|
if np.isnan(np.sum(X)) or np.isnan(np.sum(Y)): |
83
|
|
|
raise ValueError('Linear regression does not support ' |
84
|
|
|
'unknown values') |
85
|
|
|
|
86
|
|
|
theta = np.zeros(X.shape[1]) |
87
|
|
|
theta, cost, ret = fmin_l_bfgs_b(self.cost_grad, theta, |
|
|
|
|
88
|
|
|
args=(X, Y.ravel()), **self.fmin_args) |
89
|
|
|
|
90
|
|
|
return LinearRegressionModel(theta) |
91
|
|
|
|
92
|
|
|
|
93
|
|
|
class LinearRegressionModel(Model): |
94
|
|
|
def __init__(self, theta): |
|
|
|
|
95
|
|
|
self.theta = theta |
96
|
|
|
|
97
|
|
|
def predict(self, X): |
98
|
|
|
return X.dot(self.theta) |
99
|
|
|
|
100
|
|
|
|
101
|
|
|
if __name__ == '__main__': |
102
|
|
|
import Orange.data |
103
|
|
|
import sklearn.cross_validation as skl_cross_validation |
|
|
|
|
104
|
|
|
|
|
|
|
|
105
|
|
|
np.random.seed(42) |
106
|
|
|
|
107
|
|
|
def numerical_grad(f, params, e=1e-4): |
|
|
|
|
108
|
|
|
grad = np.zeros_like(params) |
109
|
|
|
perturb = np.zeros_like(params) |
110
|
|
|
for i in range(params.size): |
111
|
|
|
perturb[i] = e |
112
|
|
|
j1 = f(params - perturb) |
113
|
|
|
j2 = f(params + perturb) |
114
|
|
|
grad[i] = (j2 - j1) / (2.0 * e) |
115
|
|
|
perturb[i] = 0 |
116
|
|
|
return grad |
117
|
|
|
|
118
|
|
|
d = Orange.data.Table('housing') |
119
|
|
|
d.X = np.hstack((d.X, np.ones((d.X.shape[0], 1)))) |
120
|
|
|
d.shuffle() |
121
|
|
|
|
122
|
|
|
# m = LinearRegressionLearner(lambda_=1.0) |
123
|
|
|
# print(m(d)(d)) |
124
|
|
|
|
125
|
|
|
# # gradient check |
126
|
|
|
# m = LinearRegressionLearner(lambda_=1.0) |
127
|
|
|
# theta = np.random.randn(d.X.shape[1]) |
128
|
|
|
# |
129
|
|
|
# ga = m.cost_grad(theta, d.X, d.Y.ravel())[1] |
130
|
|
|
# gm = numerical_grad(lambda t: m.cost_grad(t, d.X, d.Y.ravel())[0], theta) |
131
|
|
|
# |
132
|
|
|
# print(np.sum((ga - gm)**2)) |
133
|
|
|
|
134
|
|
|
for lambda_ in (0.01, 0.03, 0.1, 0.3, 1, 3): |
135
|
|
|
m = LinearRegressionLearner(lambda_=lambda_) |
136
|
|
|
scores = [] |
137
|
|
|
for tr_ind, te_ind in skl_cross_validation.KFold(d.X.shape[0]): |
138
|
|
|
s = np.mean((m(d[tr_ind])(d[te_ind]) - d[te_ind].Y.ravel())**2) |
139
|
|
|
scores.append(s) |
140
|
|
|
print('{:5.2f} {}'.format(lambda_, np.mean(scores))) |
141
|
|
|
|
142
|
|
|
m = LinearRegressionLearner(lambda_=0) |
143
|
|
|
print('test data', np.mean((m(d)(d) - d.Y.ravel())**2)) |
144
|
|
|
print('majority', np.mean((np.mean(d.Y.ravel()) - d.Y.ravel())**2)) |
145
|
|
|
|
This can be caused by one of the following:
1. Missing Dependencies
This error could indicate a configuration issue of Pylint. Make sure that your libraries are available by adding the necessary commands.
2. Missing __init__.py files
This error could also result from missing
__init__.py
files in your module folders. Make sure that you place one file in each sub-folder.