机器学习python回归项目实例

import numpy as np
from numpy import arange
from matplotlib import pyplot
from pandas import read_csv
from pandas import set_option
from pandas.plotting import scatter_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
#导入数据
filename = 'housing.csv'
names = ['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS',
         'RAD','TAX','PRTATIO','B','LSTAT','MEDV']
data = read_csv(filename,names=names,delim_whitespace=True) #读入CSV文件时,制定分隔符为空格键

# #理解数据
# #查看数据维度
# print(data.shape)
# #查看各个属性的字段类型
# print(data.dtypes)
# #查看前10行数据
# set_option('display.line_width',120)  #指定输出宽度为120,使所有特征值显示在一行内
# print(data.head(10))
# #描述性统计信息
# set_option('precision',1)
# print(data.describe())
# #查看关联关系
# set_option('precision',2)
# print(data.corr(method='pearson')) #特征属性>0.7或<-0.7,为强关联性
#
# #数据可视化
# #查看每一个特征的单独分布图
# data.hist(sharex = False,sharey = False,xlabelsize = 1,ylabelsize = 1)
# pyplot.show()
# #密度图
# data.plot(kind = 'density',subplots = True,layout = (4,4),sharex = False,fontsize = 1) #layout(4,4) 用来设置4行4列的图形
# pyplot.show()
# #箱线图
# data.plot(kind = 'box',subplots = True,layout = (4,4),sharey = False,fontsize = 8)
# pyplot.show()
# #散点矩阵图
# scatter_matrix(data)
# pyplot.show()
# #相关矩阵图
# '''特征属性之间若是强相关的,建议移除特征属性,以提高算法的准确度'''
# fig = pyplot.figure() #定义一个新的画板
# ax = fig.add_subplot(111) #将画布分割成1行1列,图像画在从左到右从上到下的第1块
# cax = ax.matshow(data.corr(),vmin=-1,vmax=1,interpolation = 'none')
# fig.colorbar(cax)
# ticks = np.arange(0,14,1)
# ax.set_xticks(ticks)
# ax.set_yticks(ticks)
# ax.set_xticklabels(names)
# ax.set_yticklabels(names)
# pyplot.show()

#分离评估数据集
array = data.values
X = array[:,0:13]
Y = array[:,13]
test_size = 0.2 #20%数据用作评估训练集
seed = 7
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=test_size,random_state=seed)

#评估算法
'''采用10折交叉验证分离数据,通过均方误差来比较算法准确度,
    均方误差接近0,算法的准确度越高'''
num_folds = 10
seed = 7
scoring = 'neg_mean_squared_error'
#选择三个线性算法和三个非线性算法来比较
models = {}
models['LR'] = LinearRegression() #线性回归
models['LASSO'] = Lasso() #套索回归
models['EN'] = ElasticNet() #弹性网络回归
models['KNN'] = KNeighborsRegressor() #K近邻算法
models['CART'] = DecisionTreeRegressor() #分类与回归树
models['SVM'] = SVR() #支持向量机
results = []
for key in models:
    kfold = KFold(n_splits=num_folds,random_state=seed)
    cv_result = cross_val_score(models[key],X_train,Y_train,cv=kfold,scoring=scoring)
    results.append(cv_result)
    print('%s:%f (%f) '% (key,cv_result.mean(),cv_result.std()))
# #查看所有的10折交叉分离验证的结果
# fig = pyplot.figure()
# fig.suptitle('Algorithm Comparsion')
# ax = fig.add_subplot(111)
# pyplot.boxplot(results)
# ax.set_xticklabels(models.keys())
# pyplot.show()
#正态化数据
'''上面的过程是原始数据,接来下对数据进行正态化处理,降低不同的数据分布结构,提高准确度'''
#采用Pipeline机制 将特征转化为0为中位值,标准差为1的数据
pipelines = {}
pipelines['ScalerLR'] = Pipeline([('Scaler',StandardScaler()),('LR',LinearRegression())])
pipelines['ScalerLASSO'] = Pipeline([('Scaler',StandardScaler()),('LASSO',Lasso())])
pipelines['ScalerEN'] = Pipeline([('Scaler',StandardScaler()),('EN',ElasticNet())])
pipelines['ScalerKNN'] = Pipeline([('Scaler',StandardScaler()),('KNN',KNeighborsRegressor())])
pipelines['ScalerCART'] = Pipeline([('Scaler',StandardScaler()),('CART',DecisionTreeRegressor())])
pipelines['ScalerSVM'] = Pipeline([('Scaler',StandardScaler()),('SVM',SVR())])
results = []
for key in pipelines:
    kfold = KFold(n_splits=num_folds,random_state=seed)
    cv_result = cross_val_score(pipelines[key],X_train,Y_train,cv=kfold,scoring=scoring)
    results.append(cv_result)
    print('%s:%f (%f) '% (key,cv_result.mean(),cv_result.std()))
# #查看所有的10折交叉分离验证的结果
# fig = pyplot.figure()
# fig.suptitle('Algorithm Comparsion')
# ax = fig.add_subplot(111)
# pyplot.boxplot(results)
# ax.set_xticklabels(models.keys())
# pyplot.show()

# 调参改进算法 - KNN
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
param_grid = {'n_neighbors': [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21]}
model = KNeighborsRegressor()
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(X=rescaledX, y=Y_train)
print('最优:%s 使用%s' % (grid_result.best_score_, grid_result.best_params_))
cv_results = zip(grid_result.cv_results_['mean_test_score'],grid_result.cv_results_['std_test_score'],grid_result.cv_results_['params'])
for mean, std, param in cv_results:
    print('%f (%f) with %r' % (mean, std, param))

# 集成算法
ensembles = {}
ensembles['ScaledAB'] = Pipeline([('Scaler', StandardScaler()), ('AB', AdaBoostRegressor())])
ensembles['ScaledAB-KNN'] = Pipeline([('Scaler', StandardScaler()),('ABKNN', AdaBoostRegressor(base_estimator=KNeighborsRegressor(n_neighbors=3)))])
ensembles['ScaledAB-LR'] = Pipeline([('Scaler', StandardScaler()), ('ABLR', AdaBoostRegressor(LinearRegression()))])
ensembles['ScaledRFR'] = Pipeline([('Scaler', StandardScaler()), ('RFR', RandomForestRegressor())])
ensembles['ScaledETR'] = Pipeline([('Scaler', StandardScaler()), ('ETR', ExtraTreesRegressor())])
ensembles['ScaledGBR'] = Pipeline([('Scaler', StandardScaler()), ('RBR', GradientBoostingRegressor())])

results = []
for key in ensembles:
    kfold = KFold(n_splits=num_folds, random_state=seed)
    cv_result = cross_val_score(ensembles[key], X_train, Y_train, cv=kfold, scoring=scoring)
    results.append(cv_result)
    print('%s: %f (%f)' % (key, cv_result.mean(), cv_result.std()))
fig = pyplot.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(ensembles.keys())
pyplot.show()

# 集成算法GBM - 调参
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
param_grid = {'n_estimators': [10, 50, 100, 200, 300, 400, 500, 600, 700, 800, 900]}
model = GradientBoostingRegressor()
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(X=rescaledX, y=Y_train)
print('最优:%s 使用%s' % (grid_result.best_score_, grid_result.best_params_))

# 集成算法ET - 调参
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
param_grid = {'n_estimators': [5, 10, 20, 30, 40, 50, 60, 70, 80]}
model = ExtraTreesRegressor()
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(X=rescaledX, y=Y_train)
print('最优:%s 使用%s' % (grid_result.best_score_, grid_result.best_params_))

#训练模型
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
gbr = ExtraTreesRegressor(n_estimators=80)
gbr.fit(X=rescaledX, y=Y_train)
#评估算法模型
rescaledX_test = scaler.transform(X_test)
predictions = gbr.predict(rescaledX_test)
print(mean_squared_error(Y_test, predictions))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值