from sklearn.datasets import load_iris
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
iris = load_iris() X_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target,random_state=0) print(“Size of training set:{} size of testing set:{}”.format(X_train.shape[0],X_test.shape[0]))
#### grid search start best_score = 0 for gamma in [0.001,0.01,0.1,1,10,100]: for C in [0.001,0.01,0.1,1,10,100]: svm = SVC(gamma=gamma,C=C)#对于每种参数可能的组合,进行一次训练; svm.fit(X_train,y_train) score = svm.score(X_test,y_test) if score > best_score:#找到表现最好的参数 best_score = score best_parameters = {‘gamma’:gamma,‘C’:C} #### grid search end
X_trainval,X_test,y_trainval,y_test = train_test_split(iris.data,iris.target,random_state=0)
X_train,X_val,y_train,y_val = train_test_split(X_trainval,y_trainval,random_state=1)
print("Size of training set:{} size of validation set:{} size of teseting set:{}".format(X_train.shape[0],X_val.shape[0],X_test.shape[0]))
best_score = 0.0 for gamma in [0.001,0.01,0.1,1,10,100]: for C in [0.001,0.01,0.1,1,10,100]: svm = SVC(gamma=gamma,C=C) svm.fit(X_train,y_train) score = svm.score(X_val,y_val) if score > best_score: best_score = score best_parameters = {‘gamma’:gamma,‘C’:C} svm = SVC(**best_parameters) #使用最佳参数,构建新的模型 svm.fit(X_trainval,y_trainval) #使用训练集和验证集进行训练,more data always results in good performance. test_score = svm.score(X_test,y_test) # evaluation模型评估 print(“Best score on validation set:{:.2f}”.format(best_score)) print(“Best parameters:{}”.format(best_parameters)) print(“Best score on test set:{:.2f}”.format(test_score))
输出:
Size of training set:84sizeofvalidationset:28sizeof teseting set:38
Best score onvalidationset:0.96
Best parameters:{'gamma': 0.001, 'C': 10}
Best score ontestset:0.92
from sklearn.model_selection import cross_val_score
best_score = 0.0 for gamma in [0.001,0.01,0.1,1,10,100]: for C in [0.001,0.01,0.1,1,10,100]: svm = SVC(gamma=gamma,C=C) scores = cross_val_score(svm,X_trainval,y_trainval,cv=5) #5折交叉验证 score = scores.mean() #取平均数 if score > best_score: best_score = score best_parameters = {“gamma”:gamma,“C”:C} svm = SVC(**best_parameters) svm.fit(X_trainval,y_trainval) test_score = svm.score(X_test,y_test) print(“Best score on validation set:{:.2f}”.format(best_score)) print(“Best parameters:{}”.format(best_parameters)) print(“Score on testing set:{:.2f}”.format(test_score))
输出:
Best score on validation set:0.97
Best parameters:{'gamma': 0.01, 'C': 100}
Score on testing set:0.97
交叉验证经常与网格搜索进行结合,作为参数评价的一种方法,这种方法叫做grid search with cross validation。sklearn因此设计了一个这样的类GridSearchCV,这个类实现了fit,predict,score等方法,被当做了一个estimator,使用fit方法,该过程中:(1)搜索到最佳参数;(2)实例化了一个最佳参数的estimator;