源码:fromsklearn.model_selectionimportKFoldfromsklearn.metricsimportrecall_scorefold=KFold(5,shuffle=False)recall_accs=[]c_param_range=[0.01,0.1,1,10,100]results_table=p...
源码:
from sklearn.model_selection import KFold
from sklearn.metrics import recall_score
fold = KFold(5, shuffle=False)
recall_accs = []
c_param_range = [0.01, 0.1, 1, 10, 100]
results_table = pd.DataFrame(columns = ['C值', '平均召回率得分'])
results_table['C值'] = c_param_range
j = 0
for c_param in c_param_range:
print('C值:',c_param)
recall_accs = []
for iteration, indices in enumerate(fold.split(y_train)):
lr = LR(C = c_param, penalty = '12')
X_train = X_train.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
lr.fit(X_train.iloc[indices[0],:], y_train.iloc[indices[0]].values.ravel())
y_pred = lr.predict(X_train.iloc[indices[1],:].values)
recall_acc = recall_score(y_train.iloc[indices[1]].values, y_pred)
recall_accs.append(recall_acc)
print('迭代次数', iteration, ':召回率得分=', recall_acc)
results_table.ix[j, '平均召回率得分'] = np.mean(recall_accs)
j += 1
print('平均召回率得分:', np.mean(recall_accs))
best_c = results_table.loc[results_table['平均召回率得分'].idxmax()]['C值']
print('交叉验证最好的C值是',best_c)
错误提示:ValueError Traceback (most recent call last)
in ()
14 X_train = X_train.reset_index(drop=True)
15 y_train = y_train.reset_index(drop=True)
---> 16 lr.fit(X_train.iloc[indices[0],:], y_train.iloc[indices[0]].values.ravel())
17 y_pred = lr.predict(X_train.iloc[indices[1],:].values)
18 recall_acc = recall_score(y_train.iloc[indices[1]].values, y_pred)
D:\Anaconda3\lib\site-packages\sklearn\linear_model\logistic.py in fit(self, X, y, sample_weight)
1491 The SAGA solver supports both float64 and float32 bit arrays.
1492 """
-> 1493 solver = _check_solver(self.solver, self.penalty, self.dual)
1494
1495 if not isinstance(self.C, numbers.Number) or self.C < 0:
D:\Anaconda3\lib\site-packages\sklearn\linear_model\logistic.py in _check_solver(solver, penalty, dual)
440 if penalty not in all_penalties:
441 raise ValueError("Logistic Regression supports only penalties in %s,"
--> 442 " got %s." % (all_penalties, penalty))
443
444 if solver not in ['liblinear', 'saga'] and penalty not in ('l2', 'none'):
ValueError: Logistic Regression supports only penalties in ['l1', 'l2', 'elasticnet', 'none'], got 12.
求大神帮忙。
展开