l1_contr = self.l1_ratio * np.linalg.norm(w)
l2_contr = (1 - self.l1_ratio) * 0.5 * w.T.dot(w)
return self.alpha * (l1_contr + l2_contr)
def grad(self, w):
l1_contr = self.l1_ratio * np.sign(w)
l2_contr = (1 - self.l1_ratio) * w
return self.alpha * (l1_contr + l2_contr)
class Regression(object):
“”" Base regression model. Models the relationship between a scalar dependent variable y and the independent
variables X.
Parameters:
n_iterations: float
The number of training iterations the algorithm will tune the weights for.
learning_rate: float
The step length that will be used when updating the weights.
“”"
def init(self, n_iterations, learning_rate):
self.n_iterations = n_iterations
self.learning_rate = learning_rate
def initialize_weights(self, n_features):
“”" Initialize weights randomly [-1/N, 1/N] “”"
limit = 1 / math.sqrt(n_features)
self.w = np.random.uniform(-limit, limit, (n_features, ))
def fit(self, X, y):
Insert constant ones for bias weights
X = np.insert(X, 0, 1, axis=1)
self.training_errors = []
self.initialize_weights(n_features=X.shape[1])
Do gradient descent for n_iterations
for i in range(self.n_iterations):
y_pred = X.dot(self.w)
Calculate l2 loss
mse = np.mean(0.5 * (y - y_pred)**2 + self.regularization(self.w))
self.training_errors.append(mse)
Gradient of l2 loss w.r.t w
grad_w = -(y - y_pred).dot(X) + self.regularization.grad(self.w)