Pytorch、Tensorflow、Keras 框架下实现KNN算法(MNIST数据集)附详解代码

Pytorch、Tensorflow、Keras框架下实现KNN算法(MNIST数据集)附详解代码

K最近邻法(KNN)是最常见的监督分类算法,其中根据K值的不同取值,模型会有不一样的效率,但是并不是k值越大或者越小,模型效率越高,而是根据数据集的不同,使用交叉验证,得出最优K值。

Python—KNN分类算法(详解)
欧式距离的快捷计算方法

基于Pytorch实现KNN算法:

#******************************************************************
#从torchvision中引入常用数据集(MNIST),以及常用的预处理操作(transfrom)
from torchvision import datasets, transforms
#引入numpy计算矩阵
import numpy as np
#引入模型评估指标 accuracy_score
from sklearn.metrics import accuracy_score
import torch
#引入进度条设置以及时间设置
from tqdm import tqdm
import time

# 定义KNN函数
def KNN(train_x, train_y, test_x, test_y, k):
    #获取当前时间
    since = time.time()
    #可以将m,n理解为求其数据个数,属于torch.tensor类
    m = test_x.size(0)
    n = train_x.size(0)

    # 计算欧几里得距离矩阵,矩阵维度为m*n;
    print("计算距离矩阵")

    #test,train本身维度是m*1, **2为对每个元素平方,sum(dim=1,对行求和;keepdim =True时保持二维,
    # 而False对应一维,expand是改变维度,使其满足 m * n)
    xx = (test_x ** 2).sum(dim=1, keepdim=True).expand(m, n)
    #最后增添了转置操作
    yy = (train_x ** 2).sum(dim=1, keepdim=True).expand(n, m).transpose(0, 1)
    #计算近邻距离公式
    dist_mat = xx + yy - 2 * test_x.matmul(train_x.transpose(0, 1))
    #对距离进行排序
    mink_idxs = dist_mat.argsort(dim=-1)
    #定义一个空列表
    res = []
    for idxs in mink_idxs:
        # voting
        #代码下方会附上解释np.bincount()函数的博客
        res.append(np.bincount(np.array([train_y[idx] for idx in idxs[:k]])).argmax())

    assert len(res) == len(test_y)
    print("acc", accuracy_score(test_y, res))
    #计算运行时长
    time_elapsed = time.time() - since
    print('KNN mat training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))

#欧几里得距离计算公式
def cal_distance(x, y):
    return torch.sum((x - y) ** 2) ** 0.5
# KNN的迭代函数
def KNN_by_iter(train_x, train_y, test_x, test_y, k):
    since = time.time()

    # 计算距离
    res = []
    for x in tqdm(test_x):
        dists = []
        for y in train_x:
            dists.append(cal_distance(x, y).view(1))
        #torch.cat()用来拼接tensor
        idxs = torch.cat(dists).argsort()[:k]
        res.append(np.bincount(np.array([train_y[idx] for idx in idxs])).argmax())

    # print(res[:10])
    print("acc", accuracy_score(test_y, res))

    time_elapsed = time.time() - since
    print('KNN iter training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))


if __name__ == "__main__":
    #加载数据集(下载数据集)
    train_dataset = datasets.MNIST(root="./data", download= True, transform=transforms.ToTensor(), train=True)
    test_dataset = datasets.MNIST(root="./data", download= True, transform=transforms.ToTensor(), train=False)

    # 组织训练,测试数据
    train_x = []
    train_y = []
    for i in range(len(train_dataset)):
        img, target = train_dataset[i]
        train_x.append(img.view(-1))
        train_y.append(target)

        if i > 5000:
            break

    # print(set(train_y))

    test_x = []
    test_y = []
    for i in range(len(test_dataset)):
        img, target = test_dataset[i]
        test_x.append(img.view(-1))
        test_y.append(target)

        if i > 200:
            break

    print("classes:", set(train_y))

    KNN(torch.stack(train_x), train_y, torch.stack(test_x), test_y, 7)
    KNN_by_iter(torch.stack(train_x), train_y, torch.stack(test_x), test_y, 7)

运行结果:

classes: {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
计算距离矩阵
acc 0.9405940594059405
KNN mat training complete in 0m 0s
100%|██████████| 202/202 [00:26<00:00,  7.61it/s]
acc 0.9405940594059405
KNN iter training complete in 0m 27s

Process finished with exit code 0

参考博客:
numpy.bincount详解
Pytorch中torch.cat与torch.stack有什么区别?

基于Tensorflow实现KNN算法

#__author__ = 'HelloWorld怎么写'
#******************************************************************
#导入相关包,相关API有的只适合TF1
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

#加载MNIST数据集
from tensorflow.examples.tutorials.mnist import input_data
def loadMNIST():
    #获取数据,采用ONE_HOT形式
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
    return mnist
#定义KNN算法
def KNN(mnist):
    #训练集取前10000,测试集取500
    train_x, train_y = mnist.train.next_batch(10000)
    test_x, test_y = mnist.train.next_batch(500)
    #计算图输入占位符,[784表示列数],结果返回tensor类型
    xtr = tf.placeholder(tf.float32, [None, 784])
    xte = tf.placeholder(tf.float32, [784])
    #计算欧几里得距离;tf.negative(x)返回一个张量;tf.add()实现列表元素求和;
    # tf.reduce_sum(a,reduction_indices:axis),a为要减少的张量,axis的废弃名称
    distance = tf.sqrt(tf.reduce_sum(tf.pow(tf.add(xtr, tf.negative(xte)), 2), reduction_indices=1))
    #返回纵列的最小值
    pred = tf.argmin(distance, 0)
    #变量初始化
    init = tf.initialize_all_variables()

    sess = tf.Session()
    sess.run(init)
    #求模型准确率
    right = 0
    for i in range(500):
        ansIndex = sess.run(pred, {xtr: train_x, xte: test_x[i, :]})
        print('prediction is ', np.argmax(train_y[ansIndex]))
        print('true value is ', np.argmax(test_y[i]))
        if np.argmax(test_y[i]) == np.argmax(train_y[ansIndex]):
            right += 1.0
    accracy = right / 500.0
    print(accracy)


if __name__ == "__main__":
    #实例化函数
    mnist = loadMNIST()
    KNN(mnist)

运行结果:

...
prediction is  7
true value is  7
prediction is  0
true value is  0
0.942
Process finished with exit code 0

参考博客:
Tensorflow 利用最近邻算法实现Mnist的识别
基于TensorFlow的K近邻(KNN)分类器实现——以MNIST为例
tensorflow实现KNN识别MNIST

基于Keras实现KNN算法

#__author__ = 'HelloWorld怎么写'
#******************************************************************
#导入相关包
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils import np_utils
from keras.datasets import mnist
import os
#使用GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

#加载MNIST数据
def load_data():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    #选取训练集前10000张
    number = 10000
    x_train = x_train[0:number]
    y_train = y_train[0:number]
    #进行预处理
    x_train = x_train.reshape(number, 28 * 28)
    x_test = x_test.reshape(x_test.shape[0], 28 * 28)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    #np_utils.to_categorical()函数将y_train转变成ONE-HOT形式
    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)
    #进行标准化,x_train属于0-255,除以255,变成0-1的值
    x_train = x_train / 255
    x_test = x_test / 255

    return (x_train, y_train), (x_test, y_test)


(x_train, y_train), (x_test, y_test) = load_data()

#Keras序贯模型
model = Sequential()
#输入数据,定义数据尺寸,units 为输出空间维度;激活函数
model.add(Dense(input_dim=28 * 28, units=689, activation='relu'))
#dropout层
model.add(Dropout(0.2))
model.add(Dense(units=689, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(units=689, activation='relu'))
model.add(Dropout(0.2))
#输出层
model.add(Dense(output_dim=10, activation='softmax'))
#配置训练方法,损失函数、优化器、评估指标
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#训练模型
model.fit(x_train, y_train, batch_size=10000, epochs=20)
#评估指标
res1 = model.evaluate(x_train, y_train, batch_size=10000)
print("\n Train Acc :", res1[1])
res2 = model.evaluate(x_test, y_test, batch_size=10000)
print("\n Test Acc :", res2[1])

运行结果:

...
10000/10000 [==============================] - 0s 17us/step - loss: 0.2658 - accuracy: 0.9210

10000/10000 [==============================] - 0s 12us/step

 Train Acc : 0.940500020980835

10000/10000 [==============================] - 0s 7us/step

 Test Acc : 0.9265000224113464

Process finished with exit code 0

参考博客:
Keras MNIST 手写数字识别数据集
Keras入门级MNIST手写数字识别超级详细教程

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

问言

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值