python对自己的分类模型进行测试并得到混淆矩阵及混淆热力图

我是用CIFAR-10数据集训练的VGG-SMALL网络,现在已经得到了这个模型。关于模型怎么引入的具体说明可以看我之前的文章,有提到。
现在是直接把代码里的validate部分进行了修改,把tensor类型的数据张量改成了list,再利用python里的confusion_matrix函数进行计算。

分成两个部分,一个是直接用CIFAR-10里的带标签的数据集进行测试,还有一个是网络图片进行测试。

一.CIFAR-10数据集

# 把validate模块直接移出来,改为目标检测
import sys
import argparse
import seaborn as sns
import os
import time
import math
import random
import numpy as np
import shutil
import matplotlib.pyplot as plt
import torch.nn.functional as F
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import vgg
import cv2
import tensorflow as tf
import torchvision
from PIL import Image

from ranger import Ranger  # this is from ranger.py
from ranger import RangerVA  # this is from ranger913A.py
from ranger import RangerQH  # this is from rangerqh.py
from sklearn.metrics import confusion_matrix

# 指定显卡
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2"
# np.set_printoptions(threshold=np.inf)  # 全部输出
model_names = sorted(name for name in vgg.__dict__
                     if name.islower() and not name.startswith("__")
                     and name.startswith("vgg")
                     and callable(vgg.__dict__[name]))

parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', '-a', metavar='ARCH', default='vgg_small_1w1a',
                    choices=model_names,
                    help='model architecture: ' + ' | '.join(model_names) +
                    ' (default: vgg_small)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                    help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=400, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
                    help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
                    metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.007, type=float,
                    metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                    help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
                    metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=40, type=int,
                    metavar='N', help='print frequency (default: 20)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
                    help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
                    help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
                    help='use pre-trained model')
parser.add_argument('--half', dest='half', action='store_true',
                    help='use half-precision(16-bit) ')
parser.add_argument('--save-dir', dest='save_dir',
                    help='The directory used to save the trained models',
                    default='save_temp', type=str)
parser.add_argument('--save-every', dest='save_every',
                    help='Saves checkpoints at every specified number of epochs',
                    type=int, default=20)

best_prec1 = 0
pred_list = []
target_list = []

# 默认标签对应的是[airplane(飞机0),automobile(汽车1),bird(鸟2),cat(猫3),deer(鹿4),
# dog(狗5),frog(青蛙6),horse(马7),ship(船8)和truck(卡车9)]
def main():
    global args, best_prec1
    args = parser.parse_args()
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    val_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10(root='./data', train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=0, pin_memory=True, drop_last=True)

    model = torch.nn.DataParallel(vgg.__dict__[args.arch]())
    model.cuda()
    model.load_state_dict(torch.load("C:\\Users\\83543\\Desktop\\model_best.pth.tar")['state_dict'])
    pthfile = r'C:\\Users\\83543\\Desktop\\model_best.pth.tar'
    # 输出模型,看看最好的准确度是多少
    net = torch.load(pthfile)
    # print(net)
    model = nn.DataParallel(model)
    # print(model.module)
    validate(val_loader, model)

# 验证
def validate(val_loader, model):
    """
       Run evaluation
       """
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        # target = target.cuda(async=True)
        target = target.cuda()
        input_var = torch.autograd.Variable(input).cuda()
        target_var = torch.autograd.Variable(target)
        if args.half:
            input_var = input_var.half()

        # compute output
        output = model(input_var)
        output = output.float()

        # measure accuracy and record loss
        prec1 = accuracy(output.data, target)[0]
        top1.update(prec1.item(), input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            if (i < 30):
                loss1 = losses.val
            if (i > 30):
                loss2 = losses.val
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                i, len(val_loader), batch_time=batch_time, loss=losses,
                top1=top1))
    # 画混淆矩阵
    Confuse = confusion_matrix(target_list, pred_list, labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
    print(Confuse)
    print(' * Prec@1 {top1.avg:.3f}'
          .format(top1=top1))

    return top1.avg


class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    # 定义了更新函数,里面一共两个值,一个平均值和一个最大值。
    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


# 计算准确度的函数。
def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    # 每一次建立两个临时的list,一起加上
    temp_target_list=[]
    temp_pred_list=[]
    maxk = max(topk)
    batch_size = target.size(0)
    # pred 就是预测的分类的类别,返回该最大值和最大值的索引。所以输出的类别其实就是索引值。
    _, pred = output.topk(maxk, 1, True, True)
    # 矩阵转置
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))
    res = []
    # 把tensor张量转换成list数据,可以使用type函数对你自己的数据类型进行输出再针对性修改。
    temp_pred_list = pred[0].cpu().numpy().tolist()
    temp_target_list = target.cpu().numpy().tolist()
    # 用一个总列表把每批次的数据合并到一个list里面。
    target_list.extend(temp_target_list)
    pred_list.extend(temp_pred_list)

    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))

    return res


if __name__ == '__main__':
    main()

输出结果:

Test: [0/78]	Time 3.093 (3.093)	Loss 0.0000 (0.0000)	Prec@1 89.844 (89.844)
Test: [40/78]	Time 0.087 (0.174)	Loss 0.0000 (0.0000)	Prec@1 88.281 (90.396)
[[922   7  17   3   6   2   2   5  24  10]
 [  8 953   0   1   1   0   2   0   9  25]
 [ 18   0 863  34  32  13  27   6   4   2]
 [ 13   0  27 800  30  88  19  13   5   2]
 [  5   0  27  21 902  19  12  13   1   0]
 [  6   1  15  79  18 862   3   9   2   2]
 [  3   1  21  17   8   8 940   1   0   1]
 [  7   0  12  15  19  17   1 924   0   2]
 [ 29   9   6   2   1   1   2   0 938   9]
 [ 15  26   3   6   1   1   1   3  12 932]]
 * Prec@1 90.505

部分代码是没用的,懒得删了。

二.自己从网上下载的网络图片作为测试集,自己设置标签

自己保存的数据集目录如下,文件名很重要,图片名不重要:
在这里插入图片描述

代码如下:

# 把validate模块直接移出来,改为目标检测
import sys
import argparse
import seaborn as sns
import os
import time
import math
import random
import numpy as np
import shutil
import matplotlib.pyplot as plt
import torch.nn.functional as F
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import vgg
import cv2
import tensorflow as tf
import torchvision
from PIL import Image

from ranger import Ranger  # this is from ranger.py
from ranger import RangerVA  # this is from ranger913A.py
from ranger import RangerQH  # this is from rangerqh.py
from sklearn.metrics import confusion_matrix

# 指定显卡
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2"
# np.set_printoptions(threshold=np.inf)  # 全部输出
model_names = sorted(name for name in vgg.__dict__
                     if name.islower() and not name.startswith("__")
                     and name.startswith("vgg")
                     and callable(vgg.__dict__[name]))

parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', '-a', metavar='ARCH', default='vgg_small_1w1a',
                    choices=model_names,
                    help='model architecture: ' + ' | '.join(model_names) +
                    ' (default: vgg_small)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                    help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=400, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
                    help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
                    metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.007, type=float,
                    metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                    help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
                    metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=40, type=int,
                    metavar='N', help='print frequency (default: 20)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
                    help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
                    help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
                    help='use pre-trained model')
parser.add_argument('--half', dest='half', action='store_true',
                    help='use half-precision(16-bit) ')
parser.add_argument('--save-dir', dest='save_dir',
                    help='The directory used to save the trained models',
                    default='save_temp', type=str)
parser.add_argument('--save-every', dest='save_every',
                    help='Saves checkpoints at every specified number of epochs',
                    type=int, default=20)

best_prec1 = 0
pred_list = []
target_list = [0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3
                      ,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,
                      7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9]

# 默认标签对应的是[airplane(飞机0),automobile(汽车1),bird(鸟2),cat(猫3),deer(鹿4),
# dog(狗5),frog(青蛙6),horse(马7),ship(船8)和truck(卡车9)]
def main():
    global args, best_prec1
    args = parser.parse_args()
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # 以网络图片作为输入
    img_path = './val'
    trans = transforms.Compose([transforms.Resize((32, 32)),
                                transforms.RandomHorizontalFlip(),
                                transforms.RandomCrop(32, 4),
                                transforms.ToTensor(), normalize
                                ])
    val_loader = torchvision.datasets.ImageFolder(
        img_path, transform=trans)
    print(val_loader.classes)

    model = torch.nn.DataParallel(vgg.__dict__[args.arch]())
    model.cuda()
    model.load_state_dict(torch.load("C:\\Users\\83543\\Desktop\\model_best9050.pth.tar")['state_dict'])
    # pthfile = r'C:\\Users\\83543\\Desktop\\model_best.pth.tar'
    # 输出模型,看看最好的准确度是多少
    # net = torch.load(pthfile)
    # print(net)
    model = nn.DataParallel(model)
    # print(model.module)
    validate(val_loader, model)


# 定义一个补全图片的函数
def completion_pic(img_path):
    image = Image.open(img_path)
    image = image.convert('RGB')
    w, h = image.size
    background = Image.new('RGB', size=(max(w, h), max(w, h)), color=(255, 255, 255))  # 创建背景图,颜色值为127
    length = int(abs(w - h) // 2)  # 一侧需要填充的长度
    box = (length, 0) if w < h else (0, length)  # 粘贴的位置
    background.paste(image, box)
    image_data = background.resize((256, 256))  # 缩放
    # background.show()
    # 保存新图片吧
    image_data.save(img_path)
# 验证
def validate(val_loader, model):
    """
       Run evaluation
       """
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to evaluate mode
    model.eval()
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        #target = target.cuda()
        input_var = torch.autograd.Variable(input).cuda()
        #target_var = torch.autograd.Variable(target)
        target_var=[]
        if args.half:
            input_var = input_var.half()
        input_var = input_var.view(1, 3, 32, 32)
        # compute output
        output = model(input_var)
        output = output.float()

        # measure accuracy and record loss
        prec1 = accuracy(output.data, target)
        # top1.update(prec1.item(), input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    # 画混淆矩阵
    Confuse = confusion_matrix(target_list, pred_list, labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
    print(Confuse)
    output = open('confuse_vgg.xls', 'w', encoding='gbk')
    output.write('confuse_vgg\n')
    for i in range(len(Confuse)):
        for j in range(len(Confuse[i])):
            output.write(str(Confuse[i][j]))  # write函数不能写int类型的参数,所以使用str()转化
            output.write('\t')
        output.write('\n')  # 写完一行立马换行
    output.close()
    print(' * Prec@1 {top1.avg:.3f}'
          .format(top1=top1))

    return top1.avg


class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    # 定义了更新函数,里面一共两个值,一个平均值和一个最大值。
    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


# 计算准确度的函数。
def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""

    maxk = max(topk)
    # pred 就是预测的分类的类别,返回该最大值和最大值的索引。所以输出的类别其实就是索引值。
    _, pred = output.topk(maxk, 1, True, True)
    # 矩阵转置
    pred = pred.t()
    temp_pred_list = pred[0].cpu().numpy().tolist()
    pred_list.extend(temp_pred_list)
    res = []

    return res

if __name__ == '__main__':
    main()

输出结果如下:(prec这个变量没使用到)

['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
[[10  0  0  0  0  0  0  0  0  0]
 [ 1  9  0  0  0  0  0  0  0  0]
 [ 1  0  9  0  0  0  0  0  0  0]
 [ 0  0  0  8  0  2  0  0  0  0]
 [ 0  0  0  0 10  0  0  0  0  0]
 [ 0  0  0  0  0  9  0  0  1  0]
 [ 0  0  0  0  1  0  9  0  0  0]
 [ 0  0  2  0  0  0  0  8  0  0]
 [ 0  0  0  0  0  0  0  0 10  0]
 [ 0  0  0  0  0  0  0  0  0 10]]
 * Prec@1 0.000

热力图部分


def plot_confusion_matrix(cm, labels_name, title):
    cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]  # 归一化
    plt.imshow(cm, interpolation='nearest')  # 在特定的窗口上显示图像
    plt.title(title)  # 图像标题
    plt.colorbar()
    num_local = np.array(range(len(labels_name)))
    plt.xticks(num_local, labels_name, rotation=90)  # 将标签印在x轴坐标上
    plt.yticks(num_local, labels_name)  # 将标签印在y轴坐标上
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
cm = confusion_matrix(true_label, pred_label, )
    print(cm)
    #绘制热力图
    plot_confusion_matrix(cm, ["boots","sandal","shoes"], "HAR Confusion Matrix")
    plt.savefig('shoe_classifier.png')
    plt.show()
    #输出混淆矩阵到txt
    f = open('predict_matrix.txt', 'w')
    print(str(cm), file=f)
    f.close()

请添加图片描述

  • 1
    点赞
  • 29
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值