【线性回归】

单因子线性回归

获取数据

import pandas as pd
data = pd.read_csv(‘generated_data.csv’)
x = data.loc[:,‘x’]
y = data.loc[:,‘y’]
print(x,y)
from matplotlib import pyplot as plt
plt.scatter(x,y)
plt.show()

建立模型

from sklearn.linear_model import LinearRegression
lr_model = LinearRegression()
import numpy as np
x = np.array(x)
x = x.reshape(-1,1)
y = np.array(y)
y = y.reshape(-1,1)
print(type(x),x.shape,type(y),y.shape)
lr_model.fit(x,y)
y_predict = lr_model.predict(x)
print(y_predict)
y_3 = lr_model.predict([[3.5]])
print(y_3)
print(y)
a = lr_model.coef_
b = lr_model.intercept_
print(a,b)

分析MSE和R2

from sklearn.metrics import mean_squared_error,r2_score
MSE = mean_squared_error(y,y_predict)
R2 = r2_score(y,y_predict)
print(MSE,R2)
plt.plot(y,y_predict)
plt.show()

多因子线性回归

获取数据

import pandas as pd
import numpy as np
data = pd.read_csv(‘usa_housing_price.csv’)
data.head()

from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10,10))

fig1 =plt.subplot(231)
plt.scatter(data.loc[:,‘Avg. Area Income’],data.loc[:,‘Price’])
plt.title(‘Price VS Income’)

fig2 =plt.subplot(232)
plt.scatter(data.loc[:,‘Avg. Area House Age’],data.loc[:,‘Price’])
plt.title(‘Price VS House Age’)

fig3 =plt.subplot(233)
plt.scatter(data.loc[:,‘Avg. Area Number of Rooms’],data.loc[:,‘Price’])
plt.title(‘Price VS Number of Rooms’)

fig4 =plt.subplot(234)
plt.scatter(data.loc[:,‘Area Population’],data.loc[:,‘Price’])
plt.title(‘Price VS Area Population’)

fig5 =plt.subplot(235)
plt.scatter(data.loc[:,‘size’],data.loc[:,‘Price’])
plt.title(‘Price VS size’)
plt.show()

X = data.loc[:,‘size’]
y = data.loc[:,‘Price’]
y.head()
X = np.array(X).reshape(-1,1)
print(X.shape)

建立模型

from sklearn.linear_model import LinearRegression
LR1 = LinearRegression()
#train the model
LR1.fit(X,y)

y_predict_1 = LR1.predict(X)
print(y_predict_1)

from sklearn.metrics import mean_squared_error,r2_score
mean_squared_error_1 = mean_squared_error(y,y_predict_1)
r2_score_1 = r2_score(y,y_predict_1)
print(mean_squared_error_1,r2_score_1)

fig6 = plt.figure(figsize=(8,5))
plt.scatter(X,y)
plt.plot(X,y_predict_1,‘r’)
plt.show()

X_multi = data.drop([‘Price’],axis=1)
X_multi

#set up 2nd linear model
LR_multi = LinearRegression()
#train the model
LR_multi.fit(X_multi,y)

#make prediction
y_predict_multi = LR_multi.predict(X_multi)
print(y_predict_multi)

mean_squared_error_multi = mean_squared_error(y,y_predict_multi)
r2_score_multi = r2_score(y,y_predict_multi)
print(mean_squared_error_multi,r2_score_multi)

print(mean_squared_error_1)

fig7 = plt.figure(figsize=(8,5))
plt.scatter(y,y_predict_multi)
plt.show()

fig8 = plt.figure(figsize=(8,5))
plt.scatter(y,y_predict_1)
plt.show()

X_test = [65000,5,5,30000,200]
X_test = np.array(X_test).reshape(1,-1)
print(X_test)

y_test_predict = LR_multi.predict(X_test)
print(y_test_predict)

  • 5
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值