Pytorch迁移学习训练VGG16和模型测试(华为云modelarts)

数据集

kaggle的猫狗分类的比赛:https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition/data
数据集分为:train和test两部分,将train中的数据分成两个文件夹:cat和dog,猫和狗的数据分别放在两个文件夹中,并将数据分成一部分作为验证集。
在这里插入图片描述

训练工具

华为云的modelarts中的notebook,华为云近期有免费的P100服务器,供开发者使用,虽然每次只能用一个小时,但训练这种简单的模型已经够用了。

训练代码

数据准备

由于我使用的是华为云服务器,需要将obs中的数据下载到云服务器上,不是用华为云服务器的可以忽略该步骤。

from modelarts.session import Session
session = Session()
#将数据集导入到服务器
session.obs.download_dir(src_obs_dir="obs://cat-vs-dog20200426/train/", dst_local_dir="./data")
session.obs.download_dir(src_obs_dir="obs://cat-vs-dog20200426/valid/", dst_local_dir="./data")
session.obs.download_dir(src_obs_dir="obs://cat-vs-dog20200426/test/", dst_local_dir="./data")

训练代码

导入工具包

import torch as t
import torchvision
from torchvision import datasets , transforms, models
from torch.autograd import Variable
import matplotlib.pyplot as plt
import pylab
import os
import time

从我的obs中导入vgg16的预训练文件(因为网上在线下载较慢,所以提前下载好),不是用华为云服务器的可以省略这步

from modelarts.session import Session
session = Session()
session.obs.download_file(src_obs_file="obs://cat-vs-dog20200426/vgg16-397923af.pth", dst_local_dir="/home/ma-user/.torch/models/")

如果是使用gpu,将数据和模型存入cuda

use_gpu = t.cuda.is_available()
print(use_gpu)

数据的预处理,用ImageFolder函数打开数据集,并且图像resize为(224,224)。

data_dir = "./data"
data_transform = {x:transforms.Compose([transforms.Resize([224,224]),
                                        transforms.ToTensor()])
                  for x in ["train","valid"]}
#以文件夹的方式打开
image_datasets = {x:datasets.ImageFolder(root= os.path.join(data_dir,x),
                                         transform=data_transform[x])
                  for x in ["train","valid"]}
dataloader = {x:t.utils.data.DataLoader(dataset = image_datasets[x],
                                        batch_size = 20,
                                        shuffle = True)
              for x in ["train","valid"]
              }

定义训练的模型,因为vgg16的原本的输出是1000类,我们是二分类,所以要将模型的最后的softmax的输出改为2

model = models.vgg16(pretrained = True)

for parma in model.parameters():
    parma.require_gard = False

model.classifier = t.nn.Sequential(  # 全连接层
            t.nn.Linear(7*7*512, 4096),
            t.nn.ReLU(),
            t.nn.Dropout(p=0.5),  # 防止训练过程发生过拟合,torch.nn.Dropout对所有元素中每个元素按照概率0.5更改为零
            t.nn.Linear(4096, 4096),
            t.nn.ReLU(),
            t.nn.Dropout(p=0.5),
            t.nn.Linear(4096, 2)
        )
print(model)

打印输出的结果如下:

VGG(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace)
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace)
    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace)
    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace)
    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace)
    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace)
    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): ReLU(inplace)
    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (27): ReLU(inplace)
    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (29): ReLU(inplace)
    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=4096, bias=True)
    (1): ReLU()
    (2): Dropout(p=0.5)
    (3): Linear(in_features=4096, out_features=4096, bias=True)
    (4): ReLU()
    (5): Dropout(p=0.5)
    (6): Linear(in_features=4096, out_features=2, bias=True)
  )
)

定义损失为交叉熵损失,模型的优化方式为Adam,并将模型放入cuda。

cost = t.nn.CrossEntropyLoss()
optimizer = t.optim.Adam(model.classifier.parameters(),lr=0.00001) #只优化全连接分类

if use_gpu:
    model = model.cuda()
    cost = cost.cuda()
print(model)

训练代码
模型的参数更新只更新全连接层部分,只1个epochs模型就收敛的很好了,P100训练大概也就十几分钟。

n_epochs = 1
time_open =time.time()
for epoch in range(n_epochs):
    print("Epoch {}/{}".format(epoch,n_epochs))
    print("-"*10)
    for i in ["train","valid"]:
        running_loss = 0.0
        running_correct = 0.0
        if i =="train":
            print("Training......")
            model.train(True)
        else:
            print("Valid......")
            model.train(False)
        for batch,data in enumerate(dataloader[i],1):
            X, y = data
            if (use_gpu):
                X, y = X.cuda(), y.cuda()
            #print(X.shape)

            X, y = Variable(X), Variable(y)
            output = model(X)
            #print(output.data)
            _,pred = t.max(output.data, 1)
            optimizer.zero_grad()
            loss = cost(output, y)
            if i =="train":
                loss.backward()
                optimizer.step()
            running_loss += loss.data
            running_correct += t.sum(pred == y.data)
            #print(running_loss,running_correct)
            if batch%200 ==0 and i == "train":
                print("Batch:{},Train_loss:{:.4},Train_acc:{:.4}".format(
                    batch, running_loss/(batch*20), float(running_correct)/(batch*20.0)))
        epoch_loss = running_loss/len(image_datasets[i])
        epoch_acc = float(running_correct)/len(image_datasets[i])
        print("{} Loss:{:.4} Acc:{:.4}".format(i,epoch_loss,epoch_acc))
        time_end=time.time()
        print(time_end-time_open)

训练结果

Epoch 0/1
----------
Training......
Batch:200,Train_loss:0.01097,Train_acc:0.9225
Batch:400,Train_loss:0.008127,Train_acc:0.9395
Batch:600,Train_loss:0.006853,Train_acc:0.9481
Batch:800,Train_loss:0.006385,Train_acc:0.9508
Batch:1000,Train_loss:0.006009,Train_acc:0.9535

模型的保存
模型的保存有只保存参数和保存全部模型的方式,由于华为云服务器只支持第一种方式,所以保存模型代码如下:

# 仅保存和加载模型参数(推荐使用)
t.save(model.state_dict(), 'cat-dog-vgg16.pth')
#model_object.load_state_dict(torch.load('params.pkl'))

测试代码

测试代码不在啰嗦,这里直接放代码,并读取test中的所有文件,将测试结果写入csv文件

from torchvision import transforms, datasets as ds ,models
import torchvision as tv
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image,ImageDraw
import torch as t
from torch.autograd import Variable


transform = transforms.Compose(
    [
        transforms.Resize([224,224]),
        transforms.ToTensor()
    ]
)
label_id_name_dict = \
         {
            0: "猫",
            1: "狗",
         }
use_gpu = t.cuda.is_available()
print(use_gpu)
test_image_name = './data/test/2.jpg'

#将模型导入到云服务器中
from modelarts.session import Session
session = Session()
session.obs.download_file(src_obs_file="obs://cat-vs-dog20200426/vgg16-397923af.pth", dst_local_dir="/home/ma-user/.torch/models/")

model = models.vgg16(pretrained = True)
for parma in model.parameters():
    parma.require_gard = False

model.classifier = t.nn.Sequential(  # 全连接层
            t.nn.Linear(7*7*512, 4096),
            t.nn.ReLU(),
            t.nn.Dropout(p=0.5),  # 防止训练过程发生过拟合,torch.nn.Dropout对所有元素中每个元素按照概率0.5更改为零
            t.nn.Linear(4096, 4096),
            t.nn.ReLU(),
            t.nn.Dropout(p=0.5),
            t.nn.Linear(4096, 2)
        )
#print(model)
# 构建一个网络结构
# 将模型参数加载到新模型中
state_dict = t.load('cat-dog-vgg16.pkl')
model.load_state_dict(state_dict)
if (use_gpu):
       model= model.cuda()

def predict(model, test_image_name):
    test_image = Image.open(test_image_name)
    test_image_tensor = transform(test_image).unsqueeze(0)
    #print(test_image_tensor)
    if (use_gpu):
        test_image_tensor= test_image_tensor.cuda()
    model.eval()
    t.no_grad()
    out = model(test_image_tensor).data.cpu().numpy()
    #print(out)
    pred = np.amax(out,axis=1)
    idx = np.argmax(out, axis=1)
    lable = label_id_name_dict[idx[0]]
    #print(lable)
    print(test_image_name +":" + lable)
   # 			
test_image_name = './data/test/20.jpg'
predict(model,test_image_name)
import os
# 遍历指定目录,显示目录下的所有文件名
def eachFile(filepath):
    pathDir =  os.listdir(filepath)
    for allDir in pathDir:
        child = os.path.join('%s%s' % (filepath, allDir))
        predict(model,child)
        #print(child)# .decode('gbk')是解决中文显示乱码问题
eachFile("./data/test/")
import csv
# 1. 创建文件对象
csvFile=open("./cat_dog_result.csv",'w',newline='')
# 2. 基于文件对象构建 csv写入对象
writer=csv.writer(csvFile)
# 3. 构建列表头
writer.writerow(['id','lablel'])
# 4. 写入csv文件内容
def save_result_to_csv(model, filepath):
    pathDir =  os.listdir(filepath)
    for allDir in pathDir:
        child = os.path.join('%s%s' % (filepath, allDir))
        test_image = Image.open(child)
        test_image_tensor = transform(test_image).unsqueeze(0)
        if (use_gpu):
            test_image_tensor= test_image_tensor.cuda()
        model.eval()
        t.no_grad()
        out = model(test_image_tensor).data.cpu().numpy()
        #print(out)
        idx = np.argmax(out, axis=1)
        label = label_id_name_dict[idx[0]]
        #print(label)
        writer.writerow([allDir[:-4],idx[0]])
        #print(allDir[:-4],idx[0],label[0])

save_result_to_csv(model,"./data/test/")
csvFile.close()

#将数据在存入到obs中,不是用云服务器的可以忽略
from modelarts.session import Session
session = Session()
session.obs.upload_file(src_local_file='./cat_dog_result.csv', dst_obs_dir='obs://cat-vs-dog20200426/VGG16/')
session.obs.upload_file(src_local_file='./cat-dog-vgg16.pkl', dst_obs_dir='obs://cat-vs-dog20200426/VGG16/')
session.obs.upload_file(src_local_file='./cat-dog-vgg16.ipynb', dst_obs_dir='obs://cat-vs-dog20200426/VGG16/')

拓展

要训练其他的模型分类的代码,也是如此,就将模型换成相应模型,稍微修改模型的结构。
例如resnet50、densenet101

#resnet50
model = models.resnet50(pretrained = True)
model.fc = t.nn.Linear(2048, 2)
optimizer = t.optim.Adam(model.fc.parameters(),lr=0.00001) #只有化全连接分类
#densenet101
model = models.densenet121(pretrained = True)
model.classifier = t.nn.Sequential(  # 全连接层
            t.nn.Linear(1024, 4096),
            t.nn.ReLU(),
            t.nn.Dropout(p=0.5),  # 防止训练过程发生过拟合,torch.nn.Dropout对所有元素中每个元素按照概率0.5更改为零
            t.nn.Linear(4096, 2)
        )
optimizer = t.optim.Adam(model.classifier.parameters(),lr=0.00001) #只有化全连接分类
  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值