线性回归
机器学习——单层神经网络线性回归从零实现上篇博客使用小批量随机梯度下降法对loss函数进行优化,这篇博客将从解释解角度(即直接求解)对算法进行优化。
算法实现
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import sklearn.datasets as load_diabetes
class linear_regression():
def __init__(self):
self.w = None
def fit(self, X, y, k):
X = np.insert(X, 0, 1, axis = 1)
print(X.shape)
X_ = np.linalg.inv(X.T.dot(X)+k*np.eye(X.shape[1]))
print(X_.shape)
self.w = X_.dot(X.T).dot(y)
def predict(self, X):
X = np.insert(X, 0, 1, axis = 1)
y_predict = X.dot(self.w)
return y_predict
def mean_squared_error(y, y_predict):
return np.mean(pow(y-y_predict, 2))
def main():
k = float(input())
diabetes = load_diabetes.load_diabetes()
print(format(diabetes.keys()))
print(diabetes['feature_names'])
X = diabetes.data
#X = diabetes.data[:, np.newaxis, 3]
X_train, X_test = X[:-20], X[-20:]
y_train, y_test = diabetes.target[:-20], diabetes.target[-20:]
clf = linear_regression()
clf.fit(X_train, y_train, k)
y_predict = clf.predict(X_test)
print(format(mean_squared_error(y_test, y_predict)))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_test[:,0],X_test[:,1], y_test, c='black')
plt.show()
main()