深度学习 07 — 咖啡豆识别(VGG-16复现)

🍨 本文为[🔗365天深度学习训练营]中的学习记录博客

🍖 原作者:[K同学啊]

一、 前期准备

1. 设置GPU

如果设备上支持GPU就使用GPU,否则使用CPU

import torch
import torchvision
import torch.nn as nn
import os,PIL,pathlib,warnings
import torchvision.transforms as transforms
from torchvision import transforms, datasets

warnings.filterwarnings("ignore")

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device

 2.导入数据

data_dir = "/content/drive/MyDrive/Colab Notebooks/data"
data_dir = pathlib.Path(data_dir)

data_paths = list(data_dir.glob('*'))

classeNames = [str(path).split('/')[-1] for path in data_paths]
classeNames
['Dark', 'Light', 'Medium', 'Green']
train_transforms = transforms.Compose([
    transforms.Resize([224,224]),
    transforms.ToTensor(),
    transforms.Normalize(
        mean = [0.485,0.456,0.406],
        std = [0.229,0.224,0.225]
    )
])

test_transforms = transforms.Compose([
    transforms.Resize([224,224]),
    transforms.ToTensor(),
    transforms.Normalize(
        mean = [0.485,0.456,0.406],
        std = [0.229,0.224,0.225]
    )
])

total_data = datasets.ImageFolder("./data",transform=train_transforms)

total_data

total_data.class_to_idx
{'Dark': 0, 'Green': 1, 'Light': 2, 'Medium': 3}

3.划分数据集

train_size = int(0.8*len(total_data))
test_size = len(total_data) - train_size
train_dataset,test_dataset = torch.utils.data.random_split(total_data,[train_size,test_size])
train_dataset,test_dataset

 

batch_size = 32

train_dl = torch.utils.data.DataLoader(train_dataset,batch_size=batch_size,shuffle=True,num_workers=1)
test_dl = torch.utils.data.DataLoader(test_dataset,batch_size=batch_size,shuffle=True,num_workers=1)
for X,y in test_dl:
    print("Shape of X [N,C,H,W]: ",X.shape)
    print("Shape of y: ",y.shape)
    break

 二、手动搭建VGG-16模型

VGG-16结构说明:

  • 13个卷积层(Convolutional Layer),分别用blockX_convX表示
  • 3个全连接层(Fully connected Layer),分别用fcXpredictions表示
  • 5个池化层(Pool layer),分别用blockX_pool表示

VGG-16包含了16个隐藏层(13个卷积层和3个全连接层),故称为VGG-16

1.搭建模型

import torch.nn.functional as F

class vgg16(nn.Module):
    def __init__(self):
        super(vgg16,self).__init__()
        # 卷积块1
        self.block1 = nn.Sequential(
            nn.Conv2d(3,64,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.Conv2d(64,64,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
        )
        # 卷积块2

        self.block2 = nn.Sequential(
            nn.Conv2d(64,128,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.Conv2d(128,128,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
        )
        # 卷积块3
        self.block3 = nn.Sequential(
            nn.Conv2d(128,256,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.Conv2d(256,256,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.Conv2d(256,256,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
        )
        # 卷积块4

        self.block4 = nn.Sequential(
            nn.Conv2d(256,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
        )
        # 卷积块5
        self.block5 = nn.Sequential(
            nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
        )
        # 全连接层 
        self.classifier = nn.Sequential(
            nn.Linear(in_features=512*7*7,out_features=4096),
            nn.ReLU(),
            nn.Linear(in_features=4096,out_features=4096),
            nn.ReLU(),
            nn.Linear(in_features=4096,out_features=4)
        )
    def forward(self,x):
        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)
        x = self.block5(x)
        x = torch.flatten(x,start_dim=1)
        x = self.classifier(x)
        return x


device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device}")

model = vgg16().to(device)
model

 

2.查看模型详细

import torchsummary as summary
summary.summary(model,(3,224,224))

 三、训练模型

1.编写训练函数

def train(dataloader,model,loss_fn,optimizer):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    train_acc, train_loss = 0,0

    for X,y in dataloader:
        X,y = X.to(device),y.to(device)
        pred = model(X)
        loss = loss_fn(pred,y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_acc += (pred.argmax(1)==y).type(torch.float).sum().item()
        train_loss += loss.item()

    train_acc /= size
    train_loss /= num_batches
    return train_acc, train_loss

2.编写测试函数

测试函数和训练函数大致相同,但是由于不进行梯度下降对网络权重进行更新,所以不需要传入优化器

def test(dataloader,model,loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    test_acc, test_loss = 0,0

    with torch.no_grad():
        for imgs, target in dataloader:
            imgs,target = imgs.to(device),target.to(device)

            target_pred = model(imgs)
            loss = loss_fn(target_pred,target)
            test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()
            test_loss += loss.item()
        test_acc /= size
        test_loss /= num_batches

        return test_acc, test_loss

3.正式训练

import copy
optimizer = torch.optim.Adam(model.parameters(),lr=1e-4)
loss_fn = nn.CrossEntropyLoss()

epochs = 40

train_acc =[]
train_loss = []
test_acc = []
test_loss = []

best_acc = 0

for epoch in range(epochs):
    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl,model,loss_fn,optimizer)

    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl,model,loss_fn)

    if epoch_test_acc > best_acc:
        best_acc = epoch_test_acc
        best_model = copy.deepcopy(model)

    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)

    lr = optimizer.state_dict()['param_groups'][0]['lr']

    template = ('Epoch:{:2d},Train_acc:{:.1f}%,,Train_loss:{:.3f},Test_acc:{:.1f}%,Test_loss:{:.3f},Lr:{:.2E}')
    print(template.format(epoch+1,epoch_train_acc*100,epoch_train_loss,epoch_test_acc*100,epoch_test_loss,lr))
PATH = './best_model.pth'
torch.save(model.state_dict(),PATH)

print('Done')

 四、结果可视化

1.Loss 与Accuracy 图

import matplotlib.pyplot as plt

import warnings

warnings.filterwarnings("ignore")

epoch_range = range(epochs)

plt.figure(figsize=(12,3))
plt.subplot(1,2,1)

plt.plot(epoch_range,train_acc,label='Training Accuracy')
plt.plot(epoch_range,test_acc,label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1,2,2)
plt.plot(epoch_range,train_loss,label='Training Loss')
plt.plot(epoch_range,test_loss,label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

2.指定图片进行预测

from PIL import Image

classes  = list(total_data.class_to_idx)

def predict_one_image(imag_path,model,transform,classes):
    test_img = Image.open(imag_path).convert("RGB")

    plt.imshow(test_img)
    test_img = transform(test_img)
    img = test_img.to(device).unsqueeze(0)

    model.eval()
    output = model(img)
    _,pred = torch.max(output,1)
    pred_class = classes[pred]
    print(f'predict_result:{pred_class}' )
image_path='/content/drive/MyDrive/Colab Notebooks/data/Dark/dark (1).png'
predict_one_image(image_path,model=model,transform=train_transforms,classes=classes)

 

 

 五、调用官方的VGG-16网络框架

1.调用官方接口

from torchvision.models import vgg16
device = "cuda" if torch.cuda.is_available() else "cpu"
model = vgg16(pretrained=True).to(device)

for param in model.parameters():
  param.requires_grad = False

model.classifier._modules['6'] = nn.Linear(4096,len(classeNames))
model.to(device)
model

 训练结果:

 

 

六、个人总结

在本次实战中,主要复现了vgg16 网络,进一步了解了vgg16 框架组成,在官方与复现模型之间有些差异,复现的框架中有部分参数经过了修改,并且在全连接层中没有用到激活函数ReLU和Dropout 层。

  • 4
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值