超参数

超参数:

在运行一个机器学习算法之前需要确定的参数,通常所说的调参也说的是超参数

怎样寻找比较好的超参数:

领域知识,经验数值(比如算法中的默认值),实验搜索

例1:手写数字数据集在knn算法下寻找更好的k值
# -*- coding:utf-8  -*-
import matplotlib
import matplotlib.pyplot as plt
#数据集库
from sklearn import datasets
#数据集划分
from sklearn.model_selection import train_test_split
#knn
from sklearn.neighbors import KNeighborsClassifier

#加载手写数字数据集
digits = datasets.load_digits()
X = digits.data #数据
y = digits.target#数据标签

#将数据集划分为训练集和测试集,测试集的比例为总数的20%
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=666)

knn_clf = KNeighborsClassifier(n_neighbors = 3)#n_neighbors,就是超参
knn_clf.fit(X_train,y_train)
#预测结果
predict=knn_clf.predict(X_test)
#预测正确率
Score = knn_clf.score(X_test,y_test)
print(Score)

# 寻找最好的K
best_score = 0.0
best_n_neighbors = 0
for k in range(1,11):
    knn_clf = KNeighborsClassifier(n_neighbors=k)  # n_neighbors,就是超参
    knn_clf.fit(X_train, y_train)
    Score = knn_clf.score(X_test, y_test)
    if Score > best_score:
        best_n_neighbors = k
        best_score = Score
print("最好的k值:",best_n_neighbors)
print("预测正确率为:",best_score)

输出:
输出结果

例2:手写数字数据集在knn算法下寻找明可夫斯基距离相应的p值
# -*- coding:utf-8  -*-
import matplotlib
import matplotlib.pyplot as plt
#数据集库
from sklearn import datasets
#数据集划分
from sklearn.model_selection import train_test_split
#knn
from sklearn.neighbors import KNeighborsClassifier

#加载手写数字数据集
digits = datasets.load_digits()
X = digits.data #数据
y = digits.target#数据标签

#将数据集划分为训练集和测试集,测试集的比例为总数的20%
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=666)

knn_clf = KNeighborsClassifier(n_neighbors = 3)#n_neighbors,就是超参
knn_clf.fit(X_train,y_train)
#预测结果
predict=knn_clf.predict(X_test)
#预测正确率
Score = knn_clf.score(X_test,y_test)
print(Score)

# 寻找最好的K
# best_score = 0.0
# best_n_neighbors = 0
# for k in range(1,11):
#     knn_clf = KNeighborsClassifier(n_neighbors=k)  # n_neighbors,就是超参
#     knn_clf.fit(X_train, y_train)
#     Score = knn_clf.score(X_test, y_test)
#     if Score > best_score:
#         best_n_neighbors = k
#         best_score = Score
# print("最好的k值:",best_n_neighbors)
# print("预测正确率为:",best_score)

#寻找明可夫斯基距离相应的p值
best_p = -1
best_score = 0.0
best_n_neighbors = 0
for k in range(1,11):
    for p in range(1,6):
        knn_clf = KNeighborsClassifier(n_neighbors=k,weights="distance",p = p)  # n_neighbors,就是超参
        knn_clf.fit(X_train, y_train)
        Score = knn_clf.score(X_test, y_test)
        if Score > best_score:
            best_n_neighbors = k
            best_score = Score
            best_p = p
print("最好的k值:",best_n_neighbors)
print("预测正确率为:",best_score)
print("最好的p值:",p)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值