线性回归:LinearRegression
误差
- 真实值和预测值之间肯定存在的差异用ε表示,误差项越小越好。
y i = θ T x i + ϵ i ( 1 ) y^i=\theta^Tx^i+\epsilon^i(1) yi=θTxi+ϵi(1) - 误差εi是独立并且具有相同分布,并且服从均值为0方差为θ2的高斯分布(样本间独立,不会互相影响,所以误差εi也是独立的)
p ( ϵ i ) = 1 2 π δ e x p ( − ( ϵ i ) 2 − ( μ = 0 ) 2 δ 2 ) ( 2 ) p(\epsilon^i)=\frac{1}{\sqrt{2\pi}\delta}exp(-\frac{(\epsilon^i)^2-(\mu=0)}{2\delta^2})(2) p(ϵi)=2πδ1exp(−2δ2(ϵi)2−(μ=0))(2) - 将1式带入2式
p ( y i ∣ x i ; θ ) = 1 2 π δ e x p ( − ( y i − θ T x i ) 2 2 δ 2 ) p(y^i|x^i;\theta)=\frac{1}{\sqrt{2\pi}\delta}exp(-\frac{(y^i-\theta^Tx^i)^2}{2\delta^2}) p(yi∣xi;θ)=2πδ1exp(−2δ2(yi−θTxi)2) - 似然函数(解释:什么样的参数跟我们的数据组合后恰好是真实值):
L ( θ ) = ∏ i = 1 m p ( y i ∣ x i ; θ ) = ∏ i = 1 m 1 2 π δ e x p ( − ( y i − θ T x i ) 2 2 δ 2 ) L(\theta)=\prod^m_{i=1}{p(y^i|x^i;\theta)}=\prod^m_{i=1}{\frac{1}{\sqrt{2\pi}\delta}exp(-\frac{(y^i-\theta^Tx^i)^2}{2\delta^2})} L(θ)=i=1∏mp(yi∣xi;θ)=i=1∏m2πδ1exp(−2δ2(yi−θTxi)2)注意是概率累乘 - 对数似然:(乘法转加法)
L ( θ ) = l o g ∏ i = 1 m 1 2 π δ e x p ( − ( y i − θ T x i ) 2 2 δ 2 ) L(\theta)=log\prod^m_{i=1}{\frac{1}{\sqrt{2\pi}\delta}exp(-\frac{(y^i-\theta^Tx^i)^2}{2\delta^2})} L(θ)=logi=1∏m2πδ1exp(−2δ2(yi−θTxi)2)关注的不是极值等于多少,即不关心L(θ)或者logL(θ)的值为多少,而在乎极值的点在哪里
L ( θ ) = ∑ i = 1 m l o g 1 2 π δ e x p ( − ( y i − θ T x i ) 2 2 δ 2 ) L(\theta)=\sum^m_{i=1}{log\frac{1}{\sqrt{2\pi}\delta}exp(-\frac{(y^i-\theta^Tx^i)^2}{2\delta^2})} L(θ)=i=1∑mlog2πδ1exp(−2δ2(yi−θTxi)2)
= m l o g 1 2 π δ − 1 δ 2 ⋅ 1 2 ∑ i = 1 m ( y i − θ T x i ) 2 =mlog\frac{1}{\sqrt{2\pi}\delta}-\frac{1}{\delta^2}·\frac{1}{2}\sum^m_{i=1}{(y^i-\theta^Tx^i)^2} =mlog2πδ1−δ21⋅21i=1∑m(yi−θTxi)2
等式前半部分恒正,后半部分也恒正,要让L(θ)越大,只能让后半部分J(θ)越小 - 目标:让似然函数(对数似然)越大越好,即J(θ)越小越好,J(θ)也叫最小二乘法
J ( θ ) = 1 2 ∑ i = 1 m ( y i − θ T x i ) 2 J(\theta)=\frac{1}{2}\sum^m_{i=1}{(y^i-\theta^Tx^i)^2} J(θ)=21i=1∑m(yi−θTxi)2
梯度下降
-
目标函数cost_function:
J ( θ ) = 1 2 ∑ i = 1 m ( y i − h θ ( x i ) ) 2 J(\theta)=\frac{1}{2}\sum^m_{i=1}{(y^i-h_\theta(x^i))^2} J(θ)=21i=1∑m(yi−hθ(xi))2 -
批量梯度下降:(容易得到最优解,但是由于每次考虑所有样本,速度很慢)
∂ J ( θ ) ∂ θ j = − 1 m ∑ i = 1 m ( y i − h θ ( x i ) ) x j i \frac{\partial{J(\theta)}}{\partial\theta_j}=-\frac{1}{m}\sum^m_{i=1}{(y^i-h_\theta(x^i))x^i_j} ∂θj∂J(θ)=−m1i=1∑m(yi−hθ(xi))xji
θ j = θ j + 1 m ∑ i = 1 m ( y i − h θ ( x i ) ) x j i \theta_j=\theta_j+\frac{1}{m}\sum^m_{i=1}{(y^i-h_\theta(x^i))x^i_j} θj=θj+m1i=1∑m(yi−hθ(xi))xji -
随机梯度下降:(每次找一个样本,迭代速度快,但不一定每次都朝着收敛的方向)
θ j = θ j + ( y i − h θ ( x i ) ) x j i \theta_j=\theta_j+(y^i-h_\theta(x^i))x^i_j θj=θj+(yi−hθ(xi))xji -
小批量梯度下降:(最实用的方法,例如一共10个样本),batch(常见64,128,256)
θ j : = θ j − α 1 10 ∑ k = i i + 9 ( h θ ( x k ) − y k ) x j k \theta_j:=\theta_j-\alpha\frac{1}{10}\sum^{i+9}_{k=i}{(h_\theta(x^k)-y^k)x^k_j} θj:=θj−α101k=i∑i+9(hθ(xk)−yk)xjk -
学习率α,即步长,常见0.01,0.001,一般从大往小调
代码实现
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
标准化
def normalize(features):
features_normalized = np.copy(features).astype(float)
features_mean = np.mean(features,0)
features_deviation = np.std(features,0)
# 执行标准化
if features.shape[0] > 1:
features_normalized -= features_mean
#防止除以0
features_deviation[features_deviation == 0] = 1
features_normalized /= features_deviation
return features_normalized,features_mean,features_deviation
数据预处理
def prepare_for_training(data,polynomial_degree=0,sinusoid_degree=0, normalize_data=True):
num_examples = data.shape[0]
data_processed = np.copy(data)
#预处理
features_mean = 0
feature_deviation = 0
data_normalized = data_processed
if normalize_data:
(
data_normalized,
features_mean,
feature_deviation
) = normalize(data_processed)
data_processed = data_normalized
'''
#特征变换sinusoidal
if sinusoid_degree > 0:
sinusoids = generate_sinusoids(data_normalized,sinusoid_degree)
data_processed = np.concatenate((data_processed,sinusoids),axis=1)
#特征变换polynomial
if polynomial_degree > 0:
polynomials = generate_polynomials(data_normalized, polynomial_degree)
data_processed = np.concatenate((data_processed, polynomials), axis=1)
'''
#加1列,方便做矩阵计算
data_processed = np.hstack((np.ones((num_examples,1)),data_processed))
return data_processed, features_mean, feature_deviation
class LinearRegression:
def __init__(self,data,labels,polynomial_degree = 0,sinusoid_degree = 0,normalize_data=True):
"""
1.对数据进行预处理操作
2.先得到所有的特征个数
3.初始化参数矩阵
"""
(data_processed,
features_mean,
features_deviation) = prepare_for_training(data, polynomial_degree, sinusoid_degree,normalize_data=True)
self.data = data_processed
self.labels = labels
self.features_mean = features_mean
self.features_deviation = features_deviation
self.polynomial_degree = polynomial_degree
self.sinusoid_degree = sinusoid_degree
self.normalize_data = normalize_data
num_features = self.data.shape[1]
self.theta = np.zeros((num_features,1))
def train(self,alpha,num_iterations = 500):
"""
训练模块,执行梯度下降
"""
cost_history = self.gradient_descent(alpha,num_iterations)
return self.theta,cost_history
def gradient_descent(self,alpha,num_iterations):
"""
实际迭代模块,会迭代num_iterations次
"""
cost_history = []
for _ in range(num_iterations):
self.gradient_step(alpha)
cost_history.append(self.cost_function(self.data,self.labels))
return cost_history
def gradient_step(self,alpha):
"""
梯度下降参数更新计算方法,注意是矩阵运算
"""
num_examples = self.data.shape[0]
prediction = LinearRegression.hypothesis(self.data,self.theta)
delta = prediction - self.labels
theta = self.theta
theta = theta - alpha*(1/num_examples)*(np.dot(delta.T,self.data)).T
self.theta = theta
def cost_function(self,data,labels):
"""
损失计算方法
"""
num_examples = data.shape[0]
delta = LinearRegression.hypothesis(self.data,self.theta) - labels
cost = (1/2)*np.dot(delta.T,delta)/num_examples
return cost[0][0]
@staticmethod
def hypothesis(data,theta):
predictions = np.dot(data,theta)
return predictions
#额外的def,输出cost和predict值
def get_cost(self,data,labels):
data_processed = prepare_for_training(data,
self.polynomial_degree,
self.sinusoid_degree,
self.normalize_data
)[0]
return self.cost_function(data_processed,labels)
def predict(self,data):
"""
用训练的参数模型,与预测得到回归值结果
"""
data_processed = prepare_for_training(data,
self.polynomial_degree,
self.sinusoid_degree,
self.normalize_data
)[0]
predictions = LinearRegression.hypothesis(data_processed,self.theta)
return predictions
data = pd.read_csv('Desktop\\数据分析\\唐宇迪\\2-线性回归代码实现\\线性回归-代码实现\\data\\world-happiness-report-2017.csv')
train_data = data.sample(frac=0.8)#拆出训练集
test_data = data.drop(train_data.index)
input_param_names = 'Economy..GDP.per.Capita.'
output_param_names = 'Happiness.Score'
x_train = train_data[[input_param_names]].values
y_train = train_data[output_param_names].values
x_test = test_data[[input_param_names]].values
y_test = test_data[output_param_names].values
plt.scatter(x_train,y_train,label='Train data')
plt.scatter(x_test,y_test,label='Test data')
plt.xlabel(input_param_names)
plt.ylabel(output_param_names)
plt.title('regression')
plt.legend()
plt.show()
num_maxiterations = 500
learning_rate = 0.01
linear_regression = LinearRegression(x_train,y_train)
(theta,cost_history) = linear_regression.train(learning_rate,num_maxiterations)
print('起始cost值:',cost_history[0])
print('结束cost值:',cost_history[-1])
起始cost值: 9.02313385879938
结束cost值: 0.00039744918297887616
plt.plot(range(num_maxiterations),cost_history)
plt.xlabel('iter')
plt.ylabel('cost')
plt.title('linear_regression')
plt.show()
多元线性回归
import plotly
import plotly.graph_objs as go
plotly.offline.init_notebook_mode()
train_data = data.sample(frac=0.8)
test_data = data.drop(train_data.index)
input_param_name_1 = 'Economy..GDP.per.Capita.'
input_param_name_2 = 'Freedom'
output_param_name = 'Happiness.Score'
x_train = train_data[[input_param_name_1, input_param_name_2]].values
y_train = train_data[[output_param_name]].values
x_test = test_data[[input_param_name_1, input_param_name_2]].values
y_test = test_data[[output_param_name]].values
# Configure the plot with training dataset.
plot_training_trace = go.Scatter3d(
x=x_train[:, 0].flatten(),
y=x_train[:, 1].flatten(),
z=y_train.flatten(),
name='Training Set',
mode='markers',
marker={
'size': 10,
'opacity': 1,
'line': {
'color': 'rgb(255, 255, 255)',
'width': 1
},
}
)
plot_test_trace = go.Scatter3d(
x=x_test[:, 0].flatten(),
y=x_test[:, 1].flatten(),
z=y_test.flatten(),
name='Test Set',
mode='markers',
marker={
'size': 10,
'opacity': 1,
'line': {
'color': 'rgb(255, 255, 255)',
'width': 1
},
}
)
plot_layout = go.Layout(
title='Date Sets',
scene={
'xaxis': {'title': input_param_name_1},
'yaxis': {'title': input_param_name_2},
'zaxis': {'title': output_param_name}
},
margin={'l': 0, 'r': 0, 'b': 0, 't': 0}
)
plot_data = [plot_training_trace, plot_test_trace]
plot_figure = go.Figure(data=plot_data, layout=plot_layout)
plotly.offline.iplot(plot_figure)
num_iterations = 500
learning_rate = 0.01
polynomial_degree = 0
sinusoid_degree = 0
linear_regression = LinearRegression(x_train, y_train, polynomial_degree, sinusoid_degree)
(theta, cost_history) = linear_regression.train(
learning_rate,
num_iterations
)
print('开始损失',cost_history[0])
print('结束损失',cost_history[-1])
开始损失 14.585671364454466
结束损失 0.16945358563485072
plt.plot(range(num_iterations), cost_history)
plt.xlabel('Iterations')
plt.ylabel('Cost')
plt.title('Gradient Descent Progress')
plt.show()
predictions_num = 10
x_min = x_train[:, 0].min();
x_max = x_train[:, 0].max();
y_min = x_train[:, 1].min();
y_max = x_train[:, 1].max();
x_axis = np.linspace(x_min, x_max, predictions_num)
y_axis = np.linspace(y_min, y_max, predictions_num)
x_predictions = np.zeros((predictions_num * predictions_num, 1))
y_predictions = np.zeros((predictions_num * predictions_num, 1))
x_y_index = 0
for x_index, x_value in enumerate(x_axis):
for y_index, y_value in enumerate(y_axis):
x_predictions[x_y_index] = x_value
y_predictions[x_y_index] = y_value
x_y_index += 1
z_predictions = linear_regression.predict(np.hstack((x_predictions, y_predictions)))
plot_predictions_trace = go.Scatter3d(
x=x_predictions.flatten(),
y=y_predictions.flatten(),
z=z_predictions.flatten(),
name='Prediction Plane',
mode='markers',
marker={
'size': 1,
},
opacity=0.8,
surfaceaxis=2,
)
plot_data = [plot_training_trace, plot_test_trace, plot_predictions_trace]
plot_figure = go.Figure(data=plot_data, layout=plot_layout)
plotly.offline.iplot(plot_figure)