Kaggle_机器学习_实例2_bicycle

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import linear_model
from sklearn import cross_validation
from sklearn import svm
from sklearn.ensemble import RandomForestRegressor
from sklearn.learning_curve import learning_curve
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import explained_variance_score
pd.set_option(‘display.max_columns’, None)
pd.set_option(‘display.max_rows’, None)
plt.rcParams[‘font.sans-serif’]=[‘SimHei’] #用来正常显示中文标签
plt.rcParams[‘axes.unicode_minus’]=False #用来正常显示负号

def all_model(df_train_data):
cv = cross_validation.ShuffleSplit(len(df_train_data), n_iter=5, test_size=0.2, random_state=0)
print(cv)
# 各种模型来一圈print(“岭回归”)
for train, test in cv:
svc = linear_model.Ridge().fit(df_train_data[train], df_train_target[train])
print(“train score: {0:.3f}, test score: {1:.3f}\n”.format(
svc.score(df_train_data[train], df_train_target[train]), svc.score(df_train_data[test], df_train_target[test])))
print(“支持向量回归/SVR(kernel=‘rbf’,C=10,gamma=.001)”)
for train, test in cv:
svc = svm.SVR(kernel=‘rbf’, C=10, gamma=.001).fit(df_train_data[train], df_train_target[train])
print(“train score: {0:.3f}, test score: {1:.3f}\n”.format(
svc.score(df_train_data[train], df_train_target[train]), svc.score(df_train_data[test], df_train_target[test])))
print(“随机森林回归/Random Forest(n_estimators = 100)”)
for train, test in cv:
svc = RandomForestRegressor(n_estimators=100).fit(df_train_data[train], df_train_target[train])
print(“train score: {0:.3f}, test score: {1:.3f}\n”.format(
svc.score(df_train_data[train], df_train_target[train]), svc.score(df_train_data[test], df_train_target[test])))

def search_best_parameter(df_train_data, df_train_target):
X = df_train_data
y = df_train_target
print(“开始寻找最佳参数。。。”)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2, random_state=0)

tuned_parameters = [{'n_estimators': [10, 100, 500]}]

clf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring='r2')
clf.fit(X_train, y_train)

print("最佳参数:")
# best_estimator_ returns the best estimator chosen by the search
print(clf.best_estimator_)
print("得分分别是:")

# grid_scores_的返回值:
#    * a dict of parameter settings
#    * the mean score over the cross-validation folds
#    * the list of scores for each fold
for params, mean_score, scores in clf.grid_scores_:
    print("%0.3f (+/-%0.03f) for %r" % (mean_score, scores.std() / 2, params))

def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(“Training examples”)
plt.ylabel(“Score”)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()

plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
                 train_scores_mean + train_scores_std, alpha=0.1,
                 color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
                 test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
         label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
         label="Cross-validation score")

plt.legend(loc="best")
return plt

def data_proprecessing():
df_train = pd.read_csv(“C:/Users/13369/Desktop/kaggle/kaggle/Kaggle-Bicycle-Example/kaggle_bike_competition_train.csv”)
# 把月、日、和 小时单独拎出来,放到3列中
df_train[‘month’] = pd.DatetimeIndex(df_train.datetime).month
df_train[‘day’] = pd.DatetimeIndex(df_train.datetime).dayofweek
df_train[‘hour’] = pd.DatetimeIndex(df_train.datetime).hour
df_train_origin = df_train # 那个,保险起见,咱们还是先存一下吧
df_train = df_train.drop([‘datetime’, ‘casual’, ‘registered’], axis=1) # 抛掉不要的字段
df_train_target = df_train[‘count’].values
df_train_data = df_train.drop([‘count’], axis=1).values
return df_train, df_train_data, df_train_target, df_train_origin
if name == “main”:
df_train, df_train_data, df_train_target, df_train_origin = data_proprecessing()
search_best_parameter(df_train_data, df_train_target)
#画出拟合曲线
title = “Learning Curves (Random Forest, n_estimators = 100)”
cv_0 = cross_validation.ShuffleSplit(df_train_data.shape[0], n_iter=10, test_size=0.2, random_state=0)
estimator = RandomForestRegressor(n_estimators=100)
plot_learning_curve(estimator, title, df_train, df_train_target, (0.0, 1.01), cv=cv_0, n_jobs=4)
plt.show()

# 尝试一下缓解过拟合,当然,未必成功
print("随机森林回归/Random Forest(n_estimators=200, max_features=0.6, max_depth=15)")
for train, test in cv_0:
    svc = RandomForestRegressor(n_estimators = 200, max_features=0.6, max_depth=15).fit(df_train_data[train], df_train_target[train])
    print("train score: {0:.3f}, test score: {1:.3f}\n".format(svc.score(df_train_data[train], df_train_target[train]), svc.score(df_train_data[test], df_train_target[test])))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值