模式识别基础篇---基于pytorch框架 + kaggle(cat_dog)数据集

写在前面:如果你准备入门深度学习领域,那么还是建议你直接用pytorch框架,而非tensorflow,因为对比过这两种框架之后,最终还是pytorch更加贴近于平时的程序设计,这里不作理论叙述,仅提供实验步骤

1、主要环境搭建:

anaconda(建议使用虚拟环境)、pytorch(注:在运行程序时,若遇到No Module······的问题时,如果用的是conda虚拟环境,直接conda install xxx;若使用非虚拟环境,则执行pip install xxx)

2、数据集:

可以自行在kaggle官网下载,或者click here to download
3、此处使用的是pytorch GPU运行程序,如果是cpu,请自行将所有.cuda()处删除

4、模型

import torch
import torch.nn as nn

cfg = {
    'A' : [64,     'M', 128,      'M', 256, 256,           'M', 512, 512,           'M', 512, 512,           'M']}

class VGG(nn.Module):

    def __init__(self, features, num_class=100):
        super().__init__()
        self.features = features

        self.classifier = nn.Sequential(
            nn.Linear(512, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, num_class)
        )

    def forward(self, x):
        output = self.features(x)
        output = output.view(output.size()[0], -1)
        output = self.classifier(output)
    
        return output

def make_layers(cfg, batch_norm=False):
    layers = []

    input_channel = 3
    for l in cfg:
        if l == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            continue

        layers += [nn.Conv2d(input_channel, l, kernel_size=3, padding=1)]

        if batch_norm:
            layers += [nn.BatchNorm2d(l)]
        
        layers += [nn.ReLU(inplace=True)]
        input_channel = l
    
    return nn.Sequential(*layers)

def vgg11_bn():
    return VGG(make_layers(cfg['A'], batch_norm=True))

5、训练+验证

import torch
import torch.nn.functional as f
import numpy as np
import datetime
import time
import os
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from vgg import vgg11_bn
import argparse

parser = argparse.ArgumentParser(description='PyTorch kaggle Recognition')
parser.add_argument('--epochs', default = 90, type = int)
parser.add_argument('--lr_decay_step', default=8000, type=int)
args = parser.parse_args()

class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res

def step_adjust_learning_rate(optimizer, lr0, step, step_size, gamma):
    lr = lr0 * (gamma ** (step // step_size))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
    return lr

class TrainDSSELSTM:
    def __init__(self):
        self.seed = 2020303
        self.learning_rate = 0.001
        self.step = 10
        self.train_loader = None
        self.test_loader = None
        self.model = vgg11_bn().cuda()
        #self.model = Nasnet().cuda()

    def load_data(self):
        kwargs = {'num_workers': 0, 'pin_memory': True}
        train_loader = torch.utils.data.DataLoader(
            datasets.ImageFolder('C:\\Users\\夜阑寄语\\Desktop\\tmp\\kaggle\\dataset\\train',
                                transform=transforms.Compose([transforms.Resize((64, 64)),
                                transforms.ToTensor(),
                           ])),
            batch_size=12, shuffle=True, **kwargs)
        test_loader = torch.utils.data.DataLoader(
            datasets.ImageFolder('C:\\Users\\夜阑寄语\\Desktop\\tmp\\kaggle\\dataset\\val', 
                                transform=transforms.Compose([transforms.Resize((64, 64)),
                                transforms.ToTensor(),
                           ])),
            batch_size=12, shuffle=True, **kwargs)
        self.train_loader = train_loader
        self.test_loader = test_loader

    def train(self, epoch):
        step = 0
        top1 = AverageMeter()
        top5 = AverageMeter()
        prev_time = time.time()
        optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)

        learning_rate = step_adjust_learning_rate(optimizer=optimizer, lr0=self.learning_rate, step=step, step_size=args.lr_decay_step, gamma=0.1)
        
        for iteration, (data, target) in enumerate(self.train_loader):
            data, target = Variable(data).cuda(), Variable(target).cuda()
            optimizer.zero_grad()
            output = self.model(data)
            loss = f.nll_loss(output, target)
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            #losses.update(loss.data[0], data.size(0))
            top1.update(prec1.item(), data.size(0))
            top5.update(prec5.item(), data.size(0))

            loss.backward()
            optimizer.step()

            # Determine approximate time left
            batches_done = epoch * len(self.train_loader) + iteration
            batches_left = 90 * len(self.train_loader) - batches_done
            time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))
            prev_time = time.time()
            if iteration % self.step == 0:
                print('Train Epoch: {} lr: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} Acc: {:.2f}({:.2f}) ETA: {}'.format(
                    epoch, learning_rate, iteration * len(data), len(self.train_loader.dataset),
                    100. * iteration / len(self.train_loader), loss.item(), prec1, prec5, time_left))

        print(' * training loss: {} Prec@1: {top1.avg:.3f} Prec@5: {top5.avg:.3f}'.format(loss, top1=top1, top5=top5))

        checkpoint = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(checkpoint, './model.pth')
        print('model has saved!')

    def test(self, criterion):
        top1 = AverageMeter()
        top5 = AverageMeter()
        losses = AverageMeter()
        output_all = []
        vid_paths_all = []
        #correct = 0
        for iter_, (data, target) in enumerate(self.test_loader):
            with torch.no_grad():
                data, target = Variable(data).cuda(), Variable(target).cuda()
            output = self.model(data)
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            top1.update(prec1.item(), data.size(0))
            top5.update(prec5.item(), data.size(0))
            loss = criterion(output, target)
            losses.update(loss.data.item(), data.size(0))

            if iter_ % self.step is 0:
                print('Test set: [{}/{} ({:.0f}%)] Average loss: {:.4f}, Prec@1: {:.3f} Prec@5: {:.3f}'.format(
                        iter_ * len(data), len(self.test_loader.dataset),
                    100. * iter_ / len(self.test_loader), loss, prec1, prec5))
        print(' * testing loss: {} Prec@1: {top1.avg:.3f} Prec@5: {top5.avg:.3f}'.format(loss, top1=top1, top5=top5))

def main():

    torch.cuda.set_device(0)
    criterion = nn.CrossEntropyLoss().cuda()
    train.load_data()

    for epoch in range(1, args.epochs + 1):
        train.train(epoch)
        train.test(criterion)

if __name__ == '__main__':
    main()

6、如果是自己的数据集,只需要将load_data里的路径修改即可;这里用mnist数据集也可,但是需要先将mnist数据集提取到对应文件夹;在不同数据分类时,需要对应相应的类数,这里kaggle的cat_dog仅为2类
如果运行过程中,存在问题,也可私信
希望与大家共同进步

  • 1
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值