2021-05-14

1 篇文章 0 订阅
1 篇文章 0 订阅
本文介绍了如何利用KNN算法对MNIST数据集进行分类,首先展示了原始数据预处理后的效果,然后通过曼哈顿距离和欧拉距离两种距离度量,实现了数据归一化后的KNN分类。经过归一化处理后,模型的准确性显著提高,从36.8%提升到95.1%。
摘要由CSDN通过智能技术生成

利用KNN算法实现MNIST数据分类

下载并导入MNIST数据集代码如下:

import torch
from torch.utils.data import DataLoader
import torchvision.datasets as dsets
import numpy as np
import operator
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'


batch_size = 100
# MNIST dataset
train_dataset = dsets.MNIST(root = '/ml/pymnist', #选择数据的根目录
                           train = True, # 选择训练集
                           transform = None, #不考虑使用任何数据预处理
                           download = True) # 从网络上download图片
test_dataset = dsets.MNIST(root = '/ml/pymnist', #选择数据的根目录
                           train = False, # 选择测试集
                           transform = None, #不考虑使用任何数据预处理
                           download = True) # 从网络上download图片
#加载数据
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
                                           batch_size = batch_size,
                                           shuffle = True)  # 将数据打乱
test_loader = torch.utils.data.DataLoader(dataset = test_dataset,
                                          batch_size = batch_size,
                                          shuffle = True)

 

查看MNIST数据集中训练样本个数、测试样本个数以及图片大小等

# 训练集样本的个数、每张图片的大小
print('train_data:', train_dataset.train_data.size())
# 训练集标签的长度
print('train_labels:', train_dataset.train_labels.size())
# 测试集样本的个数、每张图片的大小
print('test_data:', test_dataset.test_data.size())
# 测试集标签的长度
print('test_labels:', test_dataset.test_labels.size())


看一下MNIST数据集中第25个图片是什么,并输出对应的标签

# 看一下MNIST数据集中第25个图片是什么,并输出对应的标签
digit = train_loader.dataset.train_data[25]
print(train_loader.dataset.train_labels[25])
plt.imshow(digit,cmap=plt.cm.binary)
plt.show()

 

下面为利用KNN算法实现MNIST数据分类

先定义KNN分类器(以曼哈顿距离为例,欧拉距离代码放在下面了)

# 以下为利用KNN算法实现MNIST数据分类
def kNN_classify(k,dis,X_train,x_train,Y_test):
    assert dis == 'E' or dis == 'M', 'dis must E or M,E代表欧拉距离,M代表曼哈顿距离'
    num_test = Y_test.shape[0]  #测试样本的数量
    labellist = []


    ## 以下使用曼哈顿距离公式作为距离度量
    if (dis == 'M'):
        for i in range(num_test):
            # 实现曼哈顿距离公式
            distances = np.sum(np.abs((X_train - np.tile(Y_test[i], (X_train.shape[0], 1)))), axis=1)
            nearest_k = np.argsort(distances)#距离由小到大进行排序,并返回index值
            topK = nearest_k[:k]#选取前k个距离
            classCount = {}
            for i in topK: #统计每个类别的个数
                classCount[x_train[i]] = classCount.get(x_train[i],0) + 1
            sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
            labellist.append(sortedClassCount[0][0])
        return np.array(labellist)

欧拉距离公式代码:

def kNN_classify(k,dis,X_train,x_train,Y_test):
    assert dis == 'E' or dis == 'M', 'dis must E or M,E代表欧拉距离,M代表曼哈顿距离'
    num_test = Y_test.shape[0]  #测试样本的数量
    labellist = []
    '''
    使用欧拉公式作为距离度量
    '''
    if (dis == 'E'):
        for i in range(num_test):
            # 实现欧拉距离公式
            distances = np.sqrt(np.sum(((X_train - np.tile(Y_test[i], (X_train.shape[0], 1))) ** 2), axis=1))
            nearest_k = np.argsort(distances)#距离由小到大进行排序,并返回index值
            topK = nearest_k[:k]#选取前k个距离
            classCount = {}
            for i in topK: #统计每个类别的个数
                classCount[x_train[i]] = classCount.get(x_train[i],0) + 1
            sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
            labellist.append(sortedClassCount[0][0])
        return np.array(labellist)

进行验证KNN在MNIST上的效果(以曼哈顿距离为例)

# 进行验证KNN在MNIST上的效果
if __name__ == '__main__':
    X_train = train_loader.dataset.train_data.numpy()  # 需要转为numpy矩阵
    X_train = X_train.reshape(X_train.shape[0], 28 * 28)  # 需要reshape之后才能放入knn分类器
    y_train = train_loader.dataset.train_labels.numpy()
    X_test = test_loader.dataset.test_data[:1000].numpy()
    X_test = X_test.reshape(X_test.shape[0], 28 * 28)
    y_test = test_loader.dataset.test_labels[:1000].numpy()
    num_test = y_test.shape[0]
    y_test_pred = kNN_classify(5, 'M', X_train, y_train, X_test)
    num_correct = np.sum(y_test_pred == y_test)
    accuracy = float(num_correct) / num_test
    print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))

运行结果如下:

Got 368 / 1000 correct => accuracy: 0.368000

36.8%的准确率

结果不是很理想

进行图像预处理(这里用归一化处理)代码如下:

先定义两个函数:

def getXmean(X_train):
    X_train = np.reshape(X_train, (X_train.shape[0], -1))  # Turn the image to 1-D
    mean_image = np.mean(X_train, axis=0)  # 求每一列均值。即求所有图片每一个像素上的平均值
    return mean_image

def centralized(x_test, mean_image):
    x_test = np.reshape(x_test, (x_test.shape[0], -1))
    x_test = x_test.astype(np.float)
    x_test -= mean_image  # 从图中减去平均值,得到零个平均值图
    return x_test
# 进行验证KNN在MNIST上的效果(数据预处理:归一化处理)
if __name__ == '__main__':
    X_train = train_loader.dataset.train_data.numpy()
    mean_image = getXmean(X_train)
    X_train = centralized(X_train, mean_image)
    y_train = train_loader.dataset.train_labels.numpy()
    X_test = test_loader.dataset.test_data[:1000].numpy()
    X_test = centralized(X_test, mean_image)
    y_test = test_loader.dataset.test_labels[:1000].numpy()
    num_test = y_test.shape[0]
    y_test_pred = kNN_classify(5, 'M', X_train, y_train, X_test)
    num_correct = np.sum(y_test_pred == y_test)
    accuracy = float(num_correct) / num_test
    print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))

运行结果如下:

Got 951 / 1000 correct => accuracy: 0.951000

95.1%的准确率

完整代码如下:

import torch
from torch.utils.data import DataLoader
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import numpy as np
import operator
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'


batch_size = 100
# MNIST dataset
train_dataset = dsets.MNIST(root = '/ml/pymnist', #选择数据的根目录
                           train = True, # 选择训练集
                           transform = None, #不考虑使用任何数据预处理
                           download = True) # 从网络上download图片
test_dataset = dsets.MNIST(root = '/ml/pymnist', #选择数据的根目录
                           train = False, # 选择测试集
                           transform = None, #不考虑使用任何数据预处理
                           download = True) # 从网络上download图片
#加载数据
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
                                           batch_size = batch_size,
                                           shuffle = True)  # 将数据打乱
test_loader = torch.utils.data.DataLoader(dataset = test_dataset,
                                          batch_size = batch_size,
                                          shuffle = True)

# 训练集样本的个数、每张图片的大小
print('train_data:', train_dataset.train_data.size())
# 训练集标签的长度
print('train_labels:', train_dataset.train_labels.size())
# 测试集样本的个数、每张图片的大小
print('test_data:', test_dataset.test_data.size())
# 测试集标签的长度
print('test_labels:', test_dataset.test_labels.size())

# 看一下MNIST数据集中第25个图片是什么,并输出对应的标签
digit = train_loader.dataset.train_data[25]
print(train_loader.dataset.train_labels[25])
plt.imshow(digit,cmap=plt.cm.binary)
plt.show()


# 以下为利用KNN算法实现MNIST数据分类
def kNN_classify(k,dis,X_train,x_train,Y_test):
    assert dis == 'E' or dis == 'M', 'dis must E or M,E代表欧拉距离,M代表曼哈顿距离'
    num_test = Y_test.shape[0]  #测试样本的数量
    labellist = []


    ## 以下使用曼哈顿距离公式作为距离度量
    if (dis == 'M'):
        for i in range(num_test):
            # 实现曼哈顿距离公式
            distances = np.sum(np.abs((X_train - np.tile(Y_test[i], (X_train.shape[0], 1)))), axis=1)
            nearest_k = np.argsort(distances)#距离由小到大进行排序,并返回index值
            topK = nearest_k[:k]#选取前k个距离
            classCount = {}
            for i in topK: #统计每个类别的个数
                classCount[x_train[i]] = classCount.get(x_train[i],0) + 1
            sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
            labellist.append(sortedClassCount[0][0])
        return np.array(labellist)

def getXmean(X_train):
    X_train = np.reshape(X_train, (X_train.shape[0], -1))  # Turn the image to 1-D
    mean_image = np.mean(X_train, axis=0)  # 求每一列均值。即求所有图片每一个像素上的平均值
    return mean_image

def centralized(x_test, mean_image):
    x_test = np.reshape(x_test, (x_test.shape[0], -1))
    x_test = x_test.astype(np.float)
    x_test -= mean_image  # 从图中减去平均值,得到零个平均值图
    return x_test

# 进行验证KNN在MNIST上的效果(数据预处理:归一化处理)
if __name__ == '__main__':
    X_train = train_loader.dataset.train_data.numpy()
    mean_image = getXmean(X_train)
    X_train = centralized(X_train, mean_image)
    y_train = train_loader.dataset.train_labels.numpy()
    X_test = test_loader.dataset.test_data[:1000].numpy()
    X_test = centralized(X_test, mean_image)
    y_test = test_loader.dataset.test_labels[:1000].numpy()
    num_test = y_test.shape[0]
    y_test_pred = kNN_classify(5, 'M', X_train, y_train, X_test)
    num_correct = np.sum(y_test_pred == y_test)
    accuracy = float(num_correct) / num_test
    print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))

一起共同进步!

  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值