波士顿房价回归

# 查看当前挂载的数据集目录, 该目录下的变更重启环境后会自动还原
# View dataset directory. This directory will be recovered automatically after resetting environment. 
!ls /home/aistudio/data
# 查看工作区文件, 该目录下的变更将会持久保存. 请及时清理不必要的文件, 避免加载过慢.
# View personal work directory. All changes under this directory will be kept even after reset. Please clean unnecessary files in time to speed up environment loading.
!ls /home/aistudio/work
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
import numpy as np
#数据预处理
dataset = load_boston()
x_data = dataset.data
y_data = dataset.target
name_data =dataset.feature_names
print(name_data)

for i in range(13):
    plt.subplot(7,2,i+1) #7行2列第i+1个图
    plt.scatter(x_data[:,i],y_data,s=10)  #横纵坐标和点的大小
    plt.title(name_data[i])
    plt.show()
    print(name_data[i], np.corrcoef(x_data[:,i],y_data))
for i in range(len(y_data)):   
    plt.scatter(i,y_data[i],s=10)  #横纵坐标和点的大小

i_=[]
for i in range(len(y_data)):
    if y_data[i] == 50:
        i_.append(i)#存储房价等于50 的异常值下标
x_data = np.delete(x_data,i_,axis=0)                            #删除样本异常值数据
y_data = np.delete(y_data,i_,axis=0)                            #删除标签异常值
name_data = dataset.feature_names
j_=[]
for i in range(13):
    if name_data[i] == 'RM' or name_data[i] == 'PTRATIO' or name_data[i] == 'LSTAT':            #提取'RM'、'PTRATIO'、'LSTAT'三个主要特征
        continue
    j_.append(i)#存储其他次要特征下标
x_data = np.delete(x_data,j_,axis=1)#在总特征中删除次要特征
print(np.shape(y_data))
print(np.shape(x_data))

from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test =train_test_split(x_data,y_data,random_state = 0,test_size = 0.20)
print(len(X_train))
print(len(X_test))
print(len(y_train))
print(len(y_test))

from sklearn import preprocessing

min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
X_test = min_max_scaler.fit_transform(X_test)

#标签归一化的目的是什么呢,实验证明,归一化之后结果好了0.1左右
y_train = min_max_scaler.fit_transform(y_train.reshape(-1,1)) #转化为任意行一列
y_test = min_max_scaler.fit_transform(y_test.reshape(-1,1)) #转化为一列

from sklearn import linear_model
#请完成线性回归的代码,生成lr_y_predict作为测试集的预测结果
lr = linear_model.LinearRegression()
lr.fit(X_train,y_train)
lr_y_predict=lr.predict(X_test)



from sklearn.metrics import r2_score
score_lr = r2_score(y_test,lr_y_predict)

#请完成岭回归的代码,并设置适当的alpha参数值
rr = linear_model.Ridge(alpha=.1)
rr.fit(X_train,y_train)
rr_y_predict=rr.predict(X_test)


score_rr = r2_score(y_test,rr_y_predict)
score_rr

lassr = linear_model.Lasso(alpha=.0001)
lassr.fit(X_train,y_train)
lassr_y_predict=lassr.predict(X_test)

score_lassr = r2_score(y_test,lassr_y_predict)
print(score_lassr)

from sklearn.svm import SVR
svr_rbf = SVR(kernel='rbf', C=100, gamma=0.1, epsilon=.1) #高斯核
svr_lin = SVR(kernel='linear', C=100, gamma='auto') #线性核
svr_poly = SVR(kernel='poly', C=100, gamma='auto', degree=3, epsilon=.1,
               coef0=1) #径向基核函数
svr_rbf_y_predict=svr_rbf.fit(X_train, y_train).predict(X_test)
score_svr_rbf = r2_score(y_test,svr_rbf_y_predict)  
svr_lin_y_predict=svr_lin.fit(X_train, y_train).predict(X_test)
score_svr_lin = r2_score(y_test,svr_lin_y_predict)   
svr_poly_y_predict=svr_poly.fit(X_train, y_train).predict(X_test)
score_svr_poly = r2_score(y_test,svr_poly_y_predict)   
 
              
# fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 10), sharey=True)
# lw = 2

# svrs = [svr_rbf, svr_lin, svr_poly]
# kernel_label = ['RBF', 'Linear', 'Polynomial']
# model_color = ['m', 'c', 'g']

# for ix, svr in enumerate(svrs):
#     print("ix:",ix)
#     print("svr:",svr)
#     axes[ix].plot(X_train, svr.fit(X_train, y_train).predict(X_train), color=model_color[ix], lw=lw,
#                   label='{} model'.format(kernel_label[ix]))
#     axes[ix].scatter(X_train[svr.support_], y_train[svr.support_], facecolor="none",
#                      edgecolor=model_color[ix], s=50,
#                      label='{} support vectors'.format(kernel_label[ix]))
#     axes[ix].scatter(X_train[np.setdiff1d(np.arange(len(X_train)), svr.support_)],
#                      y_train[np.setdiff1d(np.arange(len(y_train)), svr.support_)],
#                      facecolor="none", edgecolor="k", s=50,
#                      label='other training data')
#     axes[ix].legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),
#                     ncol=1, fancybox=True, shadow=True)

# fig.text(0.5, 0.04, 'data', ha='center', va='center')
# fig.text(0.06, 0.5, 'target', ha='center', va='center', rotation='vertical')
# fig.suptitle("Support Vector Regression", fontsize=14)
# plt.show()


def draw_infer_result(groud_truths,infer_results):
    title='Boston'
    plt.title(title, fontsize=24)
    x = np.arange(-0.2,2) 
    y = x
    plt.plot(x, y)
    plt.xlabel('ground truth', fontsize=14)
    plt.ylabel('infer result', fontsize=14)
    plt.scatter(groud_truths, infer_results,color='green',label='training cost') 
    plt.grid()
    plt.show()

draw_infer_result(y_test,lr_y_predict)
draw_infer_result(y_test,rr_y_predict)
draw_infer_result(y_test,lassr_y_predict)
draw_infer_result(y_test,svr_rbf_y_predict)
draw_infer_result(y_test,svr_lin_y_predict)
draw_infer_result(y_test,svr_poly_y_predict)
print("score of lr:",score_lr)
print("score of rr:",score_rr)
print("score of lassr:",score_lassr)
print("score of svr_rbf:",score_svr_rbf)
print("score of svr_lin:",score_svr_lin)
print("score of svr_poly:",score_svr_poly)






  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值