线性回归实战
一.线性回归
1.波士顿房价预测
import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn import datasets
# 波士顿房价
boston = datasets.load_boston()
X = np.linspace(0,10,50).reshape(-1,1)
y = np.random.randint(2,8,size = 1)*X
y/X
lr = LinearRegression()
lr.fit(X,y)
# coeficient 效率,斜率
# w ---->weight 权重,
lr.coef_
最小二乘法
# 线性代数中的矩阵运算
np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
X = boston['data']
y = boston['target']
X.shape
lr = LinearRegression(fit_intercept=False)
lr.fit(X_train,y_train)
# 斜率个数:属性的个数决定
display(lr.coef_,lr.intercept_)
# 算法预测的结果
lr.predict(X_test).round(2)[:25]
w = lr.coef_
w
# '真实'的房价显示
y_test[:25]
lr = LinearRegression(fit_intercept=True)
lr.fit(X_train,y_train)、
display(lr.coef_,lr.intercept_)
lr.predict(X_test).round(2)[:15]
# 根据斜率和截距构造方程,进行求解的结果
(X_test.dot(lr.coef_) + lr.intercept_).round(2)[:15]
2.2020年天猫双十一销量
# 认为天猫销量和年份之间存在函数关系 一元二次,一元三次
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
years = np.arange(2009,2020)
years
sales = np.array([0.5,9.36,52,191,352,571,912,1207,1682.69,2135,2684])
sales
plt.scatter(years,sales,c = 'red',marker='*',s = 80)
X = (years - 2008).reshape(-1,1)
X
from sklearn.linear_model import LinearRegression
lr = LinearRegression(fit_intercept=True)
lr.fit(X,y)
# weight 权重
w = lr.coef_[0]
# bias 偏差
b = lr.intercept_
display(w,b)
plt.scatter(years -2008,sales,c = 'red',marker='*',s = 80)
x = np.linspace(1,12,50)
plt.plot(x,w*x + b,c = 'green')
X2 = np.concatenate([X**2,X],axis= 1)
X2.shape
# 假定函数是一元二次f(x) = w1*x**2 + w2*x + b
lr = LinearRegression(fit_intercept=True)
X2 = np.concatenate([X**2,X],axis= 1)
lr.fit(X2,y)
# weight 权重
w1,w2 = lr.coef_
# bias 偏差
b = lr.intercept_
display(w1,w2,b)
plt.scatter(years -2008,sales,c = 'red',marker='*',s = 80)
x = np.linspace(1,12,50)
f = lambda x :w1*x**2 + w2*x + b
plt.plot(x,f(x),c = 'green')
# 2009 -----1
# 2010 -----2
# 2020 -----12
print('2020年天猫双十一销量预测:',np.round(f(12),1))
# 假定函数是一元二次f(x) = w1*x**2 + w2*x + b
lr = LinearRegression(fit_intercept=True)
X3 = np.concatenate([X**3,X**2,X],axis= 1)
lr.fit(X3,y)
# weight 权重
w1,w2,w3 = lr.coef_
# bias 偏差
b = lr.intercept_
plt.scatter(years -2008,sales,c = 'red',marker='*',s = 80)
x = np.linspace(1,12,50)
f = lambda x :w1*x**3 + w2*x**2 + w3*x + b
plt.plot(x,f(x),c = 'green')
# 2009 -----1
# 2010 -----2
# 2020 -----12
print('2020年天猫双十一销量预测:',np.round(f(12),1))
3.自己实现线性回归(一元一次)
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import LinearRegression
# 创建数据
X = np.linspace(2,10,20).reshape(-1,1)
# f(x) = wx + b
y = np.random.randint(1,6,size = 1)*X + np.random.randint(-5,5,size = 1)
# 噪声,加盐
y += np.random.randn(20,1)*0.8
plt.scatter(X,y,color = 'red')
# 使用已有的线性回归拟合函数
lr = LinearRegression()
lr.fit(X,y)
w = lr.coef_[0,0]
b = lr.intercept_[0]
print(w,b)
plt.scatter(X,y)
x = np.linspace(1,11,50)
plt.plot(x,w*x + b,color = 'green')
# 自己实现线性回归(简版)
# 使用梯度下降解决一元一次的线性问题:w,b
class LinearModel(object):
def __init__(self):
self.w = np.random.randn(1)[0]
self.b = np.random.randn(1)[0]
# 数学建模:将数据X和目标值关系用数学公式表达
def model(self,x):#model 模型,f(x) = wx + b
return self.w*x + self.b
def loss(self,x,y):#最小二乘
cost = (y - self.model(x))**2
# 梯度就是偏导数,求解两个未知数:w,b
gradient_w = 2*(y - self.model(x))*(-x)
gradient_b = 2*(y - self.model(x))*(-1)
return cost,gradient_w,gradient_b
# 梯度下降
def gradient_descent(self,gradient_w,gradient_b,learning_rate = 0.1):
# 更新w,b
self.w -= gradient_w*learning_rate
self.b -= gradient_b*learning_rate
# 训练fit
def fit(self,X,y):
count = 0 #算法执行优化了3000次,退出
tol = 0.0001
last_w = self.w + 0.1
last_b = self.b + 0.1
length = len(X)
while True:
if count > 3000:#执行的次数到了
break
# 求解的斜率和截距的精确度达到要求
if (abs(last_w - self.w) < tol) and (abs(last_b - self.b) < tol):
break
cost = 0
gradient_w = 0
gradient_b = 0
for i in range(length):
cost_,gradient_w_,gradient_b_ = self.loss(X[i,0],y[i,0])
cost += cost_/length
gradient_w += gradient_w_/length
gradient_b += gradient_b_/length
# print('---------------------执行次数:%d。损失值是:%0.2f'%(count,cost))
last_w = self.w
last_b = self.b
# 更新截距和斜率
self.gradient_descent(gradient_w,gradient_b,0.01)
count+=1
def result(self):
return self.w,self.b
# 使用自己实现的线性回归拟合函数
lm = LinearModel()
lm.fit(X,y)
w_,b_ = lm.result()
plt.scatter(X,y,c = 'red')
plt.plot(x,1.9649*x - 4.64088,color = 'green')
plt.plot(x,w*x + b,color = 'blue')
plt.title('自定义的算法拟合曲线',fontproperties = 'KaiTi')
4.自己实现线性回归(多元方程)
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import LinearRegression
# 一元二次
# f(x) = w1*x**2 + w2*x + b
# 二元一次
# f(x1,x2) = w1*x1 + w2*x2 + b
X = np.linspace(0,10,num = 500).reshape(-1,1)
X = np.concatenate([X**2,X],axis = 1)
X.shape
w = np.random.randint(1,10,size = 2)
b = np.random.randint(-5,5,size = 1)
# 矩阵乘法
y = X.dot(w) + b
plt.plot(X[:,1],y,color = 'r')
plt.title('w1:%d.w2:%d.b:%d'%(w[0],w[1],b[0]))
# 使用sklearn自带的算法,预测
lr = LinearRegression()
lr.fit(X,y)
print(lr.coef_,lr.intercept_)
plt.scatter(X[:,1],y,marker = '*')
x = np.linspace(-2,12,100)
plt.plot(x,1*x**2 + 6*x + 1,color = 'green')
# 自己手写的线性回归,拟合多属性,多元方程
# epoch 训练的次数,梯度下降训练多少
def gradient_descent(X,y,lr,epoch,w,b):
# 一批量多少,长度
batch = len(X)
for i in range(epoch):
# d_lost:是损失的梯度
d_loss = 0
# 梯度,斜率梯度
dw = [0 for _ in range(len(w))]
# 截距梯度
db = 0
for j in range(batch):
y_ = 0 #预测的值 预测方程 y_ = f(x) = w1*x1 + w2*x2 + b
for n in range(len(w)):
y_ += X[j][n]*w[n]
y_ += b
# (y - y_)**2 -----> 2*(y-y_)*(-1)
# (y_- y)**2 -----> 2*(y_ - y)*(1)
d_loss = -(y[j] - y_)
for n in range(len(w)):
dw[n] += X[j][n]*d_loss/float(batch)
db += 1*d_loss/float(batch)
# 更新一下系数和截距,梯度下降
for n in range(len(w)):
w[n] -= dw[n]*lr[n]
b -= db*lr[0]
return w,b
lr = [0.0001,0.0001]
w = np.random.randn(2)
b = np.random.randn(1)[0]
w_,b_ = gradient_descent(X,y,lr,5000,w,b)
print(w_,b_)
plt.scatter(X[:,1],y,marker = '*')
x = np.linspace(-2,12,100)
f = lambda x:w_[0]*x**2 + w_[1]*x + b_
plt.plot(x,f(x),color = 'green')
二.其他回归
1.糖尿病预测
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error,r2_score
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes['data']
y = diabetes['target']
lr = LinearRegression()
lr.fit(X_train,y_train)
print(lr.score(X_test,y_test))
y_ = lr.predict(X_test)
mean_squared_error(y_test,y_)
ridgeCV = RidgeCV(alphas=np.logspace(-5,1,50),scoring='r2',cv = 6)
ridgeCV.fit(X_train,y_train)
y_ = ridgeCV.predict(X_test)
r2_score(y_test,y_)
ridgeCV = RidgeCV(alphas=np.linspace(0.01,5,50),scoring='r2',cv = 6)
ridgeCV.fit(X_train,y_train)
y_ = ridgeCV.predict(X_test)
r2_score(y_test,y_)
2.线性回归过拟合和正则化
(1)过拟合问题
机器学习中,如果参数过多,模型过于复杂,容易造成过拟合(overfit)。即模型在训练样本数据上表现的很好,但在实际测试样本上表现的较差,不具备良好的泛化能力。为了避免过拟合,最常用的一种方法是使用使用正则化,例如 L1 和 L2 正则化。
图一方程系数少,方程简单,欠拟合;图三方法系数多而大,方程复杂,过拟合;图二比较合适。
(2)正则化
a.L1正则:
即原损失函数 + 所有权重的平均绝对值 * λ ,其中λ >0。
表达式中sgn(w)表示w的符号。
η是学习率,就是步幅的意思,比原始的更新规则多出了η * λ * sgn(w)/n这一项。当w为正时,更新后的w变小。当w为负时,更新后的w变大——因此它的效果就是让w往0靠,使网络中的权重尽可能为0,也就相当于减小了网络复杂度,防止过拟合。
b.L2正则:
L2正则化,即原损失函数 + 所有权重平方和的平均值 * λ / 2 , λ>=0,解决过拟合原理与L1正则相似。
3.岭回归和套索回归
# LinearRegression,Ridge,Lasso
import numpy as np
from sklearn.linear_model import LinearRegression,Ridge,Lasso,RidgeCV,LassoCV
import matplotlib.pyplot as plt
%matplotlib inline
# 50样本,200特征
# 无解:无数个解
X = np.random.randn(50,200)
w = np.random.randn(200)
w
# 将其中的190个置为0
index = np.arange(0,200)
np.random.shuffle(index)
index
w[index[:190]] = 0
w
y = X.dot(w)
y
import warnings
warnings.filterwarnings('ignore')
linear = LinearRegression(fit_intercept=False)
ridge = RidgeCV(alphas = [0.001,0.01,0.1,1,2,5,10,20,50,100],cv = 5,fit_intercept=False)
lasso = LassoCV(alphas=[0.001,0.01,0.1,1,2,5,10],cv = 3,fit_intercept=False)
linear.fit(X,y)
ridge.fit(X,y)
lasso.fit(X,y)
linear_w = linear.coef_
ridge_w = ridge.coef_
lasso_w = lasso.coef_
plt.figure(figsize=(12,9))
axes = plt.subplot(2,2,1)
axes.plot(w)
axes = plt.subplot(2,2,2)
axes.plot(linear_w)
axes.set_title('Linear')
axes = plt.subplot(2,2,3)
axes.plot(ridge_w)
axes.set_title('Ridge')
axes = plt.subplot(2,2,4)
axes.plot(lasso_w)
axes.set_title('Lasso')
4.岭回归 α \alpha α优化
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import Ridge
X = 1/(np.arange(1,11) + np.arange(0,10).reshape(-1,1))
X
y = np.ones(10)
y
ridge = Ridge(fit_intercept=False)
alphas = np.logspace(start = -10,stop = -2,num = 200)
coefs = []
for a in alphas:
ridge.set_params(alpha = a)
ridge.fit(X,y)
coefs.append(ridge.coef_)
# 结论:找图中平滑的区域作为alpha值(参考)
_ = plt.plot(alphas,coefs)
plt.xscale('log')
plt.ylabel('coef',fontsize = 25,color = 'red',rotation = 0)
plt.xlabel('alpha',fontsize = 25)
4.逻辑斯蒂回归
# 逻辑斯蒂回归回归,用于分类而不是回归
import numpy as np
from sklearn.linear_model import LogisticRegression,LogisticRegressionCV、
from sklearn import datasets
from sklearn.model_selection import train_test_split
np.logspace(-4,4,100)
LogisticRegressionCV(Cs = [0.001,0.01,0.1,1,5,10,100])
X,y = datasets.load_iris(True)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2)
lr = LogisticRegression()
lr.fit(X_train,y_train)
# 分类问题,准确率 96.6%
lr.score(X_test,y_test)
# 类别分3类
np.unique(y)
# 每个类别中4个属性
X.shape
lr.coef_
lr.intercept_
# 分类都是概率
lr.predict(X_test)
proba_ = lr.predict_proba(X_test)
proba_
proba_.argmax(axis = 1)
w = lr.coef_
w
b = lr.intercept_
b
X_test.shape
proba_[:10]
y_ = X_test.dot(w.T) + b
# 逻辑斯蒂函数 = sigmoid函数
prob = 1 /(1 + np.e**(- y_))
# 归一化操作 和算法预测的概率完全一样
prob/prob.sum(axis = 1).reshape(-1,1)
5.逻辑斯蒂原理
6.逻辑斯蒂概率计算
import numpy as np
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
X,y = datasets.load_iris(True)
cond = y!=2
X = X[cond]
y = y[cond]
from sklearn.model_selection import train_test_split
result = train_test_split(X,y,test_size = 0.2)
result
lr = LogisticRegression()
lr.fit(result[0],result[2])
w = lr.coef_
b = lr.intercept_
print(w,b)
# X_test = result[1]
proba_ = lr.predict_proba(result[1])
proba_
# 手动计算概率
h = result[1].dot(w[0].T) + b
# 类别1的概率,p;另一类的概率是 1-p
# sigmoid函数中计算概率
p = 1/(1 + np.e**(-h))
np.c_[1-p,p]
#多分类概率计算
X,y = datasets.load_iris(True)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2)
'''{'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'},
optional (default='liblinear')'''
lr = LogisticRegression(multi_class = 'multinomial',solver = 'saga')
proba_ = lr.predict_proba(X_test)
proba_
x = np.array([1,3,-1,10])
# softmax 软最大:将数值转化成概率,比较
p = np.e**(x)/((np.e**(x)).sum())
p
# 三分类,三个方程,每个方程中4个系数
w = lr.coef_
w
b = lr.intercept_
b
h = X_test.dot(w.T) + b
# softmax
# 根据 softmax数学公式,计算了类别的概率
p = np.e**h/((np.e**h).sum(axis = 1).reshape(-1,1))
p[:10]