KNN实现图像分类(JMU-机器学习作业)

KNN实现图像分类(JMU-机器学习作业)

KNN算法介绍

KNN(K-Nearest Neighbor)是最简单的机器学习算法之一,可以用于分类和回归,是一种监督学习算法。它的思路是这样,如果一个样本在特征空间中的K个最相似(即特征空间中最邻近)的样本中的大多数属于某一个类别,则该样本也属于这个类别。也就是说,该方法在定类决策上只依据最邻近的一个或者几个样本的类别来决定待分样本所属的类别。

KNN算法中距离的计算

从算法介绍中可看出KNN算法中对距离的计算直接影响着算法性能。本次任务为图像分类任务,我计算距离的方法是先将两个图像做减法,之后将获得的矩阵的channel通道加和后开方。可以获得一个二维矩阵,然后将矩阵中全部元素的加和当作两张图像的距离。

算法实现

数据集

数据集为随机采集集美大学中的汽车和电动车,下图为训练集的部分图像。图像包含了白天和黑天两种天气情况,7种不同的车型。

在这里插入图片描述
在这里插入图片描述

由于时间关系,采集的数据并不是很完备,等之后作业时会再采一部分,拓展数据集的完整性。

数据集加载

数据集加载使用torch库中的torch.utils.data.DataLoader,代码如下

data_transform = {  # 数据预处理函数
        "train": transforms.Compose([transforms.Resize((224, 224)),
                                    transforms.RandomHorizontalFlip(),  
                                    transforms.ToTensor(),
                                    # 将原本的取值范围0-255转化为0.1~1.0
                                    # 且将原来的序列(H,W,C)转化为(C,H,W)
                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
                                    # 使用均值和标准差对Tensor进行标准化

        "val": transforms.Compose([transforms.Resize((224, 224)),
                                    transforms.ToTensor(),
                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

    root_path = os.getcwd()
    data_path = os.path.abspath(os.path.join(root_path, "data_set"))
    train_path = os.path.abspath(os.path.join(data_path, "train"))
    val_path = os.path.abspath(os.path.join(data_path, "val"))
    model_path = os.path.abspath(os.path.join(root_path, "model"))

    train_dataset = datasets.ImageFolder(root=train_path, transform=data_transform["train"])
    train_num = len(train_dataset)
    date_list = train_dataset.class_to_idx  # 类名对应的索引

    cla_dict = dict((val, key) for key, val in date_list.items())  # 遍历字典,将key val值返回
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)  # 通过json将cla_dict字典进行编码
    with open('class.json', 'w') as json_file:
        json_file.write(json_str)  # ‘class_indices.json’, 将字典的key值保存在文件中,方便在之后的预测中读取它的信息

    batch_size = 1
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=0)
    validata_dataset = datasets.ImageFolder(root=val_path, transform=data_transform["val"])
    val_num = len(validata_dataset)
    validata_loader = torch.utils.data.DataLoader(validata_dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=0)

KNN算法实现

import torch
from collections import Counter
from operator import itemgetter

def get_distance(data1, data2):
    data1=torch.squeeze(data1)
    data2=torch.squeeze(data2)
    diffs_squared_distance = torch.pow(data1 - data2, 2)
    diffs_squared_distance_ = torch.sqrt(sum(diffs_squared_distance))
    return diffs_squared_distance_.sum()

def get_neighbours(training_set,train_label, test_instance, k):
    distances = [get_distance(training_instance,test_instance) for training_instance in training_set]
    distances = list(zip(distances,train_label))
    # index 1 is the calculated distance between training_instance and test_instance
    sorted_distances = sorted(distances, key=itemgetter(0))
    # print(sorted_distances[:k])
    # select first k elements
    return sorted_distances[:k]

#构建投票选择函数
def get_majority_vote(neighbours):
    # index 1 is the class
    classes = [neighbour[1] for neighbour in neighbours]
    count = Counter(classes)
    # print(count)
    return count.most_common()[0][0]

主要代码

for step, train_data_ in enumerate(train_loader, start=0):
        train_images, train_labels = train_data_
        train_data.append(train_images)
        train_label.append(train_labels.item())
    for k in tqdm(range(1,30)):
        acc = 0.0
        for step, val_data_ in enumerate(validata_loader, start=0):
            val_images, val_labels = val_data_
            neighbours = get_neighbours(train_data,train_label, val_images, k=k)
            majority_vote = get_majority_vote(neighbours)
            acc += (majority_vote==val_labels.to(device)).sum().item()
        val_accurate = acc / val_num
        val_accurate_.append(val_accurate) 
        K_temp.append(k)

完整代码

完整代码分为train.py和knn.py两部分

train.py

import torch.nn as nn
from torchvision import transforms, datasets
import json
import os
import torch.optim as optim
import torch
import numpy as np
from PIL import Image
from sklearn.datasets import load_iris
from sklearn import model_selection
from sklearn.metrics import classification_report, accuracy_score
from knn import get_distance,get_neighbours,get_majority_vote
import matplotlib.pyplot as plt
from tqdm import tqdm

def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    data_transform = {  # 数据预处理函数
        # transforms.Compose:将使用的预处理方法打包成一个整体
        "train": transforms.Compose([transforms.Resize((224, 224)),
                                    transforms.RandomHorizontalFlip(),
                                    transforms.ToTensor(),
                                    # 将原本的取值范围0-255转化为0.1~1.0
                                    # 且将原来的序列(H,W,C)转化为(C,H,W)
                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
                                    # 使用均值和标准差对Tensor进行标准化

        "val": transforms.Compose([transforms.Resize((224, 224)),
                                    transforms.ToTensor(),
                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

    root_path = os.getcwd()
    data_path = os.path.abspath(os.path.join(root_path, "data_set"))
    train_path = os.path.abspath(os.path.join(data_path, "train"))
    val_path = os.path.abspath(os.path.join(data_path, "val"))
    model_path = os.path.abspath(os.path.join(root_path, "model"))

    train_dataset = datasets.ImageFolder(root=train_path, transform=data_transform["train"])
    train_num = len(train_dataset)
    date_list = train_dataset.class_to_idx  # 类名对应的索引

    cla_dict = dict((val, key) for key, val in date_list.items())  # 遍历字典,将key val值返回
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)  # 通过json将cla_dict字典进行编码
    with open('class.json', 'w') as json_file:
        json_file.write(json_str)  # ‘class_indices.json’, 将字典的key值保存在文件中,方便在之后的预测中读取它的信息

    batch_size = 1
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=0)
    validata_dataset = datasets.ImageFolder(root=val_path, transform=data_transform["val"])
    val_num = len(validata_dataset)
    validata_loader = torch.utils.data.DataLoader(validata_dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=0)

    train_data=[]
    train_label=[]
    val_accurate_=[]
    K_temp=[]                                              
    for step, train_data_ in enumerate(train_loader, start=0):
        train_images, train_labels = train_data_
        train_data.append(train_images)
        train_label.append(train_labels.item())
    for k in tqdm(range(1,30)):
        acc = 0.0
        for step, val_data_ in enumerate(validata_loader, start=0):
            val_images, val_labels = val_data_
            neighbours = get_neighbours(train_data,train_label, val_images, k=k)
            majority_vote = get_majority_vote(neighbours)
            acc += (majority_vote==val_labels.to(device)).sum().item()
        val_accurate = acc / val_num
        val_accurate_.append(val_accurate) 
        K_temp.append(k)
        # print(val_accurate)
    plt.plot(K_temp, val_accurate_)
    plt.ylabel('accuracy')
    plt.xlabel('k')
    plt.savefig("accurate.png")
    
if __name__ == "__main__":
    main()

knn.py

import torch
from collections import Counter
from operator import itemgetter

def get_distance(data1, data2):
    data1=torch.squeeze(data1)
    data2=torch.squeeze(data2)
    diffs_squared_distance = torch.pow(data1 - data2, 2)
    diffs_squared_distance_ = torch.sqrt(sum(diffs_squared_distance))
    return diffs_squared_distance_.sum()

def get_neighbours(training_set,train_label, test_instance, k):
    distances = [get_distance(training_instance,test_instance) for training_instance in training_set]
    distances = list(zip(distances,train_label))
    # index 1 is the calculated distance between training_instance and test_instance
    sorted_distances = sorted(distances, key=itemgetter(0))
    # print(sorted_distances[:k])
    # select first k elements
    return sorted_distances[:k]

#构建投票选择函数
def get_majority_vote(neighbours):
    # index 1 is the class
    classes = [neighbour[1] for neighbour in neighbours]
    count = Counter(classes)
    # print(count)
    return count.most_common()[0][0]

运行结果

请添加图片描述

结果思考

由结果可以看出KNN中的K值选取对K近邻算法的结果会产生重大影响,其K值对精度的影响呈现为先增后减。具体分析为如果选择较小的K值,就相当于用较小的领域中的训练实例进行预测,“学习”近似误差会减小,只有与输入实例较近或相似的训练实例才会对预测结果起作用,与此同时带来的问题是“学习”的估计误差会增大,换句话说,K值的减小就意味着整体模型变得复杂,容易发生过拟合;如果选择较大的K值,就相当于用较大领域中的训练实例进行预测,其优点是可以减少学习的估计误差,但缺点是学习的近似误差会增大,会使模型过度平滑。这时候,与输入实例较远(不相似的)训练实例也会对预测器作用,使预测发生错误,且K值的增大就意味着整体的模型变得简单。在此任务中,k为3和4的时候效果最好,精度达到了100%。

  • 1
    点赞
  • 22
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值