#随机森林,多棵决策树组成
import pandas as pd
from sklearn.model_selection._validation import cross_val_score #交叉检验,计算平均正确率
import numpy as np
filename = "dicision_trees_sample.csv"
#修复参数
dataset = pd.read_csv(filename, parse_dates=["Date"]) #将Date列的值由字符串改为日期类型。
#定义表头即定义属性列。
dataset.columns = ["Date","StartTime","VistorTeam","VisitorPTS","HomeTeam","HomePTS","ScoreType","OT?","Notes"]
#添加新特征,主场获胜与否(1表示主场获胜,0表示主场未获胜),作为预测的结果是否正确的标准。
dataset["HomeWin"] = dataset["VisitorPTS"] < dataset["HomePTS"]
x_c = dataset["HomeWin"].values
from collections import defaultdict
won_last = defaultdict(int)
#dataset["HomeLastWin"]和dataset["VistorLastWin"]必须要定义,否则下面循环里的row赋值将没有作用。
#球队上场的输赢作为判断依据(思路是这样的:如果上一次比赛球队A获胜,则预测下一场比赛A也会获胜)
dataset["HomeLastWin"] = 0 #默认每一个主场球队在第一次出现的时候都是输的
dataset["VistorLastWin"] = 0 #默认每一个客场球队在第一次出现的时候都是输的
#dataset.sort_values("Date"),如果报错,可以将sort_values改为sort(pandas版本问题),数据按照时间排序,因为上一次比赛与下一场比赛是通过时间进行区分的。
for index, row in dataset.sort_values("Date").iterrows():
homeTeam = row["HomeTeam"]
visitorTeam = row["VistorTeam"]
row["HomeLastWin"] = won_last[homeTeam]
row["VistorLastWin"] = won_last[visitorTeam]
dataset.ix[index] = row
won_last[homeTeam] = row["HomeWin"]
won_last[visitorTeam] = not row["HomeWin"]
#创建新特征,提高准确率,球队排名作为预测是否获胜的依据
dataset["HomeTeamRanksHiger"] = 0
standings_filename = "leagues_NBA_2013_standings_expanded-standings.csv"
standings = pd.read_csv(standings_filename)
for index, row in dataset.sort_values("Date").iterrows():
homeTeam = row["HomeTeam"]
visitorTeam = row["VistorTeam"]
#处理有些球队更名问题
if homeTeam == "New Orleans Pelicans":
homeTeam = "New Orleans Hornets"
elif visitorTeam == "New Orleans Pelicans":
visitorTeam = "New Orleans Hornets"
#standings[ standings["Team"]== homeTeam ],首先在standings筛选出homeTeam,然后得到它的排名
homeRank = standings[ standings["Team"]== homeTeam ]["Rk"].values[0] #存放主场球队排名
visitorRank = standings[ standings["Team"]== visitorTeam]["Rk"].values[0] #存放客场球队排名
row["HomeTeamRanksHiger"] = int(homeRank > visitorRank)
dataset.ix[index] = row
x_homehigher = dataset[["HomeLastWin", "VistorLastWin","HomeTeamRanksHiger"]].values
#随机森林采用默认参数
from sklearn.ensemble import RandomForestClassifier
x_d = x_homehigher;
clf = RandomForestClassifier(random_state=14)
scores = cross_val_score(clf, x_d, x_c, scoring="accuracy")
print("默认参数随机森林,Accuracy: {0:.1f}%".format(np.mean(scores) * 100))
#通过GridSearchCV从parameter_space来搜索随机森林最佳参数
from sklearn.model_selection._search import GridSearchCV
parameter_space = {
"max_features": [1,2,3,'auto'], #数值取值范围为(0,3],因为x_d只有3个属性
"n_estimators": [10,100],
"criterion": ["gini", "entropy"],
"min_samples_leaf": [2, 4, 6],
}
grid = GridSearchCV(clf, parameter_space)
grid.fit(x_d, x_c)
print("从参数空间搜索最佳参数随机森林,Accuracy: {0:.1f}%".format(grid.best_score_ * 100))
print(grid.best_estimator_)
运行结果截图: