超参数:
在运行一个机器学习算法之前需要确定的参数,通常所说的调参也说的是超参数
怎样寻找比较好的超参数:
领域知识,经验数值(比如算法中的默认值),实验搜索
例1:手写数字数据集在knn算法下寻找更好的k值
# -*- coding:utf-8 -*-
import matplotlib
import matplotlib.pyplot as plt
#数据集库
from sklearn import datasets
#数据集划分
from sklearn.model_selection import train_test_split
#knn
from sklearn.neighbors import KNeighborsClassifier
#加载手写数字数据集
digits = datasets.load_digits()
X = digits.data #数据
y = digits.target#数据标签
#将数据集划分为训练集和测试集,测试集的比例为总数的20%
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=666)
knn_clf = KNeighborsClassifier(n_neighbors = 3)#n_neighbors,就是超参
knn_clf.fit(X_train,y_train)
#预测结果
predict=knn_clf.predict(X_test)
#预测正确率
Score = knn_clf.score(X_test,y_test)
print(Score)
# 寻找最好的K
best_score = 0.0
best_n_neighbors = 0
for k in range(1,11):
knn_clf = KNeighborsClassifier(n_neighbors=k) # n_neighbors,就是超参
knn_clf.fit(X_train, y_train)
Score = knn_clf.score(X_test, y_test)
if Score > best_score:
best_n_neighbors = k
best_score = Score
print("最好的k值:",best_n_neighbors)
print("预测正确率为:",best_score)
输出:
例2:手写数字数据集在knn算法下寻找明可夫斯基距离相应的p值
# -*- coding:utf-8 -*-
import matplotlib
import matplotlib.pyplot as plt
#数据集库
from sklearn import datasets
#数据集划分
from sklearn.model_selection import train_test_split
#knn
from sklearn.neighbors import KNeighborsClassifier
#加载手写数字数据集
digits = datasets.load_digits()
X = digits.data #数据
y = digits.target#数据标签
#将数据集划分为训练集和测试集,测试集的比例为总数的20%
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=666)
knn_clf = KNeighborsClassifier(n_neighbors = 3)#n_neighbors,就是超参
knn_clf.fit(X_train,y_train)
#预测结果
predict=knn_clf.predict(X_test)
#预测正确率
Score = knn_clf.score(X_test,y_test)
print(Score)
# 寻找最好的K
# best_score = 0.0
# best_n_neighbors = 0
# for k in range(1,11):
# knn_clf = KNeighborsClassifier(n_neighbors=k) # n_neighbors,就是超参
# knn_clf.fit(X_train, y_train)
# Score = knn_clf.score(X_test, y_test)
# if Score > best_score:
# best_n_neighbors = k
# best_score = Score
# print("最好的k值:",best_n_neighbors)
# print("预测正确率为:",best_score)
#寻找明可夫斯基距离相应的p值
best_p = -1
best_score = 0.0
best_n_neighbors = 0
for k in range(1,11):
for p in range(1,6):
knn_clf = KNeighborsClassifier(n_neighbors=k,weights="distance",p = p) # n_neighbors,就是超参
knn_clf.fit(X_train, y_train)
Score = knn_clf.score(X_test, y_test)
if Score > best_score:
best_n_neighbors = k
best_score = Score
best_p = p
print("最好的k值:",best_n_neighbors)
print("预测正确率为:",best_score)
print("最好的p值:",p)