模型验证处理

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import warnings 
warnings.filterwarnings("ignore")

from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import SGDRegressor

train_data_file = "./data/zhengqi_train.txt"
test_data_file = "./data/zhengqi_test.txt"

#归一化处理
from sklearn import preprocessing
features_columns = [col for col in train_data.columns if col not in ['target']]
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler = min_max_scaler.fit(train_data[features_columns])
train_data_scaler = min_max_scaler.transform(train_data[features_columns])
test_data_scaler = min_max_scaler.transform(test_data[features_columns])
train_data_scaler = pd.DataFrame(train_data_scaler)
train_data_scaler.columns = features_columns
test_data_scaler = pd.DataFrame(test_data_scaler)
test_data_scaler.columns = features_columns
train_data_scaler['target']=train_data['target']

#PCA降维
from sklearn.decomposition import PCA
pca = PCA(n_components=16)
new_train_pca_16 = pca.fit_transform(train_data_scaler.iloc[:,0:-1])
new_test_pca_16 = pca.transform(test_data_scaler)
new_train_pca_16 = pd.DataFrame(new_train_pca_16)
new_test_pca_16 = pd.DataFrame(new_test_pca_16)
new_train_pca_16['target'] = train_data_scaler['target']

#数据切分训练集与测试集
new_train_pca_16 = new_train_pca_16.fillna(0)
train = new_train_pca_16[new_test_pca_16.columns]
target = new_train_pca_16['target']
train_data,test_data,train_target,test_target = train_test_split(train,target,test_size=0.2,random_state=0)

#模拟欠拟合
clf = SGDRegressor(max_iter=500,tol=1e-2)
clf.fit(train_data,train_target)
score_train = mean_squared_error(train_target,clf.predict(train_data))
score_test = mean_squared_error(test_target,clf.predict(test_data))
print('SGDRegressor train MSE : ',score_train)
print('SGDRegressor test MSE : ',score_test)

#过拟合
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(5)
train_data_poly = poly.fit_transform(train_data)
test_data_poly = poly.transform(test_data)
clf = SGDRegressor(max_iter=1000,tol=1e-3)
clf.fit(train_data_poly,train_target)
score_train = mean_squared_error(train_target,clf.predict(train_data_poly))
score_test = mean_squared_error(test_target,clf.predict(test_data_poly))
print('SGDRegressor train MSE : ',score_train)
print('SGDRegressor test MSE : ',score_test)

#正常拟合
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(3)
train_data_poly = poly.fit_transform(train_data)
test_data_poly = poly.transform(test_data)
clf = SGDRegressor(max_iter=1000,tol=1e-3)
clf.fit(train_data_poly,train_target)
score_train = mean_squared_error(train_target,clf.predict(train_data_poly))
score_test = mean_squared_error(test_target,clf.predict(test_data_poly))
print('SGDRegressor train MSE : ',score_train)
print('SGDRegressor test MSE : ',score_test)

#L2范数正则化模型处理
poly = PolynomialFeatures(3)
train_data_poly = poly.fit_transform(train_data)
test_data_poly = poly.transform(test_data)
clf = SGDRegressor(max_iter=1000,tol=1e-3,penalty='L2',alpha=0.0001)
clf.fit(train_data_poly,train_target)
score_train = mean_squared_error(train_target,clf.predict(train_data_poly))
score_test = mean_squared_error(test_target,clf.predict(test_data_poly))
print('SGDRegressor train MSE : ',score_train)
print('SGDRegressor test MSE : ',score_test)

#L1范数正则化模型处理
poly = PolynomialFeatures(3)
train_data_poly = poly.fit_transform(train_data)
test_data_poly = poly.transform(test_data)
clf = SGDRegressor(max_iter=1000,tol=1e-3,penalty='L1',alpha=0.0001)
clf.fit(train_data_poly,train_target)
score_train = mean_squared_error(train_target,clf.predict(train_data_poly))
score_test = mean_squared_error(test_target,clf.predict(test_data_poly))
print('SGDRegressor train MSE : ',score_train)
print('SGDRegressor test MSE : ',score_test)

#Elasticnet模型处理
poly = PolynomialFeatures(3)
train_data_poly = poly.fit_transform(train_data)
test_data_poly = poly.transform(test_data)
clf = SGDRegressor(max_iter=1000,tol=1e-3,penalty='elasticnet',l1_ratio=0.9,alpha=0.00001)
clf.fit(train_data_poly,train_target)
score_train = mean_squared_error(train_target,clf.predict(train_data_poly))
score_test = mean_squared_error(test_target,clf.predict(test_data_poly))
print('SGDRegressor train MSE : ',score_train)
print('SGDRegressor test MSE : ',score_test)

#简单交叉验证
from sklearn.model_selection import train_test_split
train_data,test_data,train_target,test_target = train_test_split(train,target,test_size=0.2,random_state=0)
clf = SGDRegressor(max_iter=1000,tol=1e-3)
clf.fit(train_data,train_target)
score_train = mean_squared_error(train_target,clf.predict(train_data))
score_test = mean_squared_error(test_target,clf.predict(test_data))
print('SGDRegressor train MSE:',score_train)
print('SGDRegressor test MSE : ',score_test)

# K交叉验证
from sklearn.model_selection import KFold
kf = KFold(n_splits=5)
for k,(train_index,test_index) in enumerate(kf.split(train)):
	train_data,test_data,train_target,test_target = train.values[train_index],train.values[test_index],target[train_index],target[test_index]
	clf = SGDRegressor(max_iter=1000,tol=1e-3)
	clf.fit(train_data,train_target)
	score_train = mean_squared_error(train_target,clf.predict(train_data))
	score_test = mean_squared_error(test_target,clf.predict(test_data))
	print(k,'SGDRegressor train MSE:',score_train)
	print(k,'SGDRegressor test MSE : ',score_test)

#留一法交叉验证
from sklearn.model_selection import LeaveOneOut
loo = LeaveOneOut()
num = 100
for k,(train_index,test_index) in enumerate(loo.split(train)):
	train_data,test_data,train_target,test_target = train.values[train_index],train.values[test_index],target[train_index],target[test_index]
	clf = SGDRegressor(max_iter=1000,tol=1e-3)
	clf.fit(train_data,train_target)
	score_train = mean_squared_error(train_target,clf.predict(train_data))
	score_test = mean_squared_error(test_target,clf.predict(test_data))
	print(k,'SGDRegressor train MSE:',score_train)
	print(k,'SGDRegressor test MSE : ',score_test)
	if k>=9:break

#留P法交叉验证
from sklearn.model_selection import LeavePOut
lpo = LeavePOut()
num=100
for k,(train_index,test_index) in enumerate(lpo.split(train)):
	train_data,test_data,train_target,test_target = train.values[train_index],train.values[test_index],target[train_index],target[test_index]
	clf = SGDRegressor(max_iter=1000,tol=1e-3)
	clf.fit(train_data,train_target)
	score_train = mean_squared_error(train_target,clf.predict(train_data))
	score_test = mean_squared_error(test_target,clf.predict(test_data))
	print(k,'10个 SGDRegressor train MSE:',score_train)
	print(k,'10个 SGDRegressor test MSE : ',score_test)
	if k>=9:break

#穷举网格搜索,遍历所有参数组合
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split

train_data,test_data,train_target,test_target = train_test_split(train,target,test_size=0.2,random_state=0)
randomForestRegressor = RandomForestRegressor()
parameters = {'n_estimators':[50,100,200],'max_depth':[1,2,3]}
clf=GridSearchCV(randomForestRegressor,parameters,cv=5)
clf.fit(train_data,train_target)
score_test = mean_squared_error(test_target,clf.predict(test_data))
print("RandomForestRegressor GridearchCV test MSE : ",score_test)
sorted(clf.cv_results_.keys())
print(clf.best_params_)

#随机参数优化
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split

train_data,test_data,train_target,test_target = train_test_split(train,target,test_size=0.2,random_state=0)
parameters = {'n_estimators':[50,100,200],'max_depth':[1,2,3]}
clf.RandomizedSearchCV(randomForestRegressor,parameters,cv=5)
clf.fit(train_data,train_target)
score= mean_squared_error(test_target,clf.predict(test_data))
print("RandomForestRegressor RandomizeSearchCV test MSE : ",score)
sorted(clf.cv_results_.keys())

#LGB调参
lgbmr = lgb.LGBMRegressor(num_leaves=31)
parameters = {'learning_rate':[0.01,0.1,1],'n_estimators':[20,40]}
clf=GridSearchCV(lgbmr,parameters,cv=5)
clf.fit(train_data,train_target)
print("Best parameters found by grid search : ",clf.best_params_)
score_test = mean_squared_error(test_target,clf.predict(test_data))
print('LGBMRegressor GridSearchCV test MSE : ',score_test)


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值