RandomizedSearchCV
from sklearn.grid_search import RandomizedSearchCV
代码(链接):
grid = RandomizedSearchCV(clf1,param_dist,cv = 3,scoring = 'neg_log_loss',n_iter=300,n_jobs = -1)
#在训练集上训练
grid.fit(traindata.values,np.ravel(trainlabel.values))
#返回最优的训练器
best_estimator = grid.best_estimator_
print(best_estimator)
#输出最优训练器的精度
print(grid.best_score_)
1. 官方说明文档:sklearn.model_selection.RandomizedSearchCV
2. 指定评估指标scoring:The scoring parameter: defining model evaluation rules
3. 指标中F1分数的一些解释:sklearn中 F1-micro 与 F1-macro区别和计算原理
GridSearchCV
from sklearn.grid_search import GridSearchCV
代码(链接):
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
import tflearn
import tflearn.datasets.mnist as mnist
#加载数据
x_train, y_train, x_test, y_test = mnist.load_data(one_hot=False)
#构建模型,选择核函数并训练
clf = SVC()
clf.set_params(kernel='rbf', probability=True).fit(x_train[:1000,:], y_train[:1000])
preds1 = clf.predict(x_test[:1000,:])
print("基准测试集验证得分:"+str(np.mean(preds1 == y_test[:1000])))
#设置即将要变换的参数
param_grid = {'C': [0.0001, 0.001, 0.01,0.1, 1, 10, 100, 1000], 'gamma': [0.001, 0.0001]}
#构建自动调参容器,n_jobs参数支持同时多个进程运行并行测试
grid_search = GridSearchCV(clf, param_grid, n_jobs = 1, verbose=10)
grid_search.fit(x_train[:1000,:], y_train[:1000])
#选出最优参数
best_parameters = grid_search.best_estimator_.get_params()
for para, val in list(best_parameters.items()):
print(para, val)
#使用最优参数进行训练
clf = SVC(kernel='rbf', C=best_parameters['C'], gamma=best_parameters['gamma'], probability=True).fit(x_train[:1000,:], y_train[:1000])
preds1 = clf.predict(x_test[:1000,:])
print("最优测试集验证得分:"+str(np.mean(preds1 == y_test[:1000])))