365天深度学习训练营-第P7周:咖啡豆识别

>- **🍨 本文为(🔗365天深度学习训练营)中的学习记录博客**
>- **🍦 参考文章:[Pytorch实战 | 第P5周:运动鞋识别]
>- **🍖 原作者:[K同学啊 | 接辅导、项目定制]
>- **🚀 文章来源:[K同学的学习圈子]

🍺 要求:

  1. 自己搭建VGG-16网络框架(完成)
  1. 调用官方的VGG-16网络框架(P6已完成)
  1. 如何查看模型的参数量以及相关指标 (完成)

🍻 拔高(可选):

  1. 验证集准确率达到100% (达到了99.2%)
  1. 使用PPT画出VGG-16算法框架图(发论文需要这项技能)(完成)

🔎 探索(难度有点大)

  1. 在不影响准确率的前提下轻量化模型 
  • 目前VGG16的Total params是134,276,932

一、前期准备

1.设置GPU

import torch
import torchvision

if __name__=='__main__':
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using {} device\n".format(device))
Using cuda device

2.导入本地数据并划分数据集

import pathlib
def Localdata(data_dir):
    data_dir = pathlib.Path(data_dir)
    data_path = list(data_dir.glob("*"))
    ClassNames = [str(path).split("\\")[-1] for path in data_path] 


    train_transforms = torchvision.transforms.Compose([
            torchvision.transforms.Resize([224,224]),
            torchvision.transforms.ToTensor(),#把PIL Image或numpy.ndaaray转换成tensor
            torchvision.transforms.Normalize(
                mean=[0.486, 0.456, 0.406],
                std=[0.229, 0.224, 0.225])

    ])
    total_dataset = torchvision.datasets.ImageFolder(data_dir,transform=train_transforms)
    print(total_dataset,'\n')
    print(total_dataset.class_to_idx,'\n')
    
    train_size = int(0.8* len(total_dataset))
    test_size  = len(total_dataset)-train_size
    print('train_size:',train_size,'test_size:',test_size,'\n')
                                  
    train_dataset,test_dataset = torch.utils.data.random_split(total_dataset,[train_size,test_size])
    return ClassNames,train_dataset,test_dataset


import os
if __name__=='__main__':
    
    root = 'G://'
    output = 'output'
    data_dir = os.path.join(root,'49-data')
    batch_size = 32
    ClassNames,train_ds,test_ds = Localdata(data_dir)
    '''图片类别数'''
    num_classes = len(ClassNames)
    print("num_classes:{0}\n".format(num_classes))
    
            

Dataset ImageFolder
    Number of datapoints: 1200
    Root location: G:\49-data
    StandardTransform
Transform: Compose(
               Resize(size=[224, 224], interpolation=bilinear, max_size=None, antialias=warn)
               ToTensor()
               Normalize(mean=[0.486, 0.456, 0.406], std=[0.229, 0.224, 0.225])
           ) 

{'Dark': 0, 'Green': 1, 'Light': 2, 'Medium': 3} 

train_size: 960 test_size: 240 

num_classes:4

3.数据加载+数据查看

'''加载数据,并设置batch_size'''
import matplotlib.pyplot as plt
def loadData(train_ds,test_ds,batch_size=32,root='',show_flag=False):
    train_dl = torch.utils.data.DataLoader(train_ds,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=2)
    
    test_dl  = torch.utils.data.DataLoader(test_ds,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=2)
    
    for X,y in test_dl:
        print("shape of X[N,C,H,W]:",X.shape)
        print("shape of y:",y.shape,y.dtype,'\n')
        break
    
    imgs, labels = next(iter(train_dl))
    print('Image shape:',imgs.shape,'\n')
    displayData(imgs,root,show_flag)
    return train_dl,test_dl

'''数据可视化'''
def displayData(imgs,root='',flag=False):
    plt.figure('Data Visualization',figsize=(20,5))
    for i, imgs in enumerate(imgs[:20]):
        #调整维度顺序,将[3,224,224]调整为[224,224,3]
        npimg = imgs.numpy().transpose((1,2,0))
        plt.subplot(2,10,i+1)
        plt.imshow(npimg)
        plt.axis('off')
    plt.savefig(os.path.join(root,'DatasetDisplay.png'))
    if flag:
        plt.show()
    else:
        plt.close("all")
        

train_dl,test_dl = loadData(train_ds,test_ds,batch_size,root,True)

shape of X[N,C,H,W]: torch.Size([32, 3, 224, 224])
shape of y: torch.Size([32]) torch.int64 

 二、手动搭建 VGG-16模型

关于VGG-16模型,网上已经有很多资料对其进行详述了,具体可以参考网站:VGGNet-16 Architecture: A Complete Guide | Kaggle

 

以上图片为VGG-16算法框架 

import torch.nn as nn
import torchsummary
class VGG16(nn.Module):
    def __init__(self):
        super(VGG16,self).__init__()
        #卷积块1
        self.block1 = nn.Sequential(
        nn.Conv2d(3, 64, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.Conv2d(64, 64, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
        )
        #卷积块2
        self.block2 = nn.Sequential(
        nn.Conv2d(64, 128, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.Conv2d(128, 128, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
        )
        #卷积块3
        self.block3 = nn.Sequential(
        nn.Conv2d(128, 256, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.Conv2d(256, 256, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.Conv2d(256, 256, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))

        )
        #卷积块4
        self.block4 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.Conv2d(512, 512, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.Conv2d(512, 512, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
        )
        #卷积块5
        self.block5 = nn.Sequential(
        nn.Conv2d(512, 512, kernel_size=(3,3),stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.Conv2d(512, 512, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.Conv2d(512, 512, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
        )

        #全连接网络层,用于分类
        self.classifier = nn.Sequential(
        nn.Linear(in_features=512*7*7,out_features=4096),
        nn.ReLU(),
        nn.Linear(in_features=4096, out_features=4096),
        nn.ReLU(),
        nn.Linear(in_features=4096, out_features=num_classes)
        )

    def forward(self, x):
        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)
        x = self.block5(x)
        x = torch.flatten(x, start_dim=1)
        x = self.classifier(x)

        return x
    
if __name__ == '__main__':
    '''设置GPU'''
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using {} device\n".format(device))
    
    model = VGG16().to(device)
    
    torchsummary.summary(model,(3, 224, 224))
    print(model)
            

Using cuda device

----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Conv2d-1         [-1, 64, 224, 224]           1,792
              ReLU-2         [-1, 64, 224, 224]               0
            Conv2d-3         [-1, 64, 224, 224]          36,928
              ReLU-4         [-1, 64, 224, 224]               0
         MaxPool2d-5         [-1, 64, 112, 112]               0
            Conv2d-6        [-1, 128, 112, 112]          73,856
              ReLU-7        [-1, 128, 112, 112]               0
            Conv2d-8        [-1, 128, 112, 112]         147,584
              ReLU-9        [-1, 128, 112, 112]               0
        MaxPool2d-10          [-1, 128, 56, 56]               0
           Conv2d-11          [-1, 256, 56, 56]         295,168
             ReLU-12          [-1, 256, 56, 56]               0
           Conv2d-13          [-1, 256, 56, 56]         590,080
             ReLU-14          [-1, 256, 56, 56]               0
           Conv2d-15          [-1, 256, 56, 56]         590,080
             ReLU-16          [-1, 256, 56, 56]               0
        MaxPool2d-17          [-1, 256, 28, 28]               0
           Conv2d-18          [-1, 512, 28, 28]       1,180,160
             ReLU-19          [-1, 512, 28, 28]               0
           Conv2d-20          [-1, 512, 28, 28]       2,359,808
             ReLU-21          [-1, 512, 28, 28]               0
           Conv2d-22          [-1, 512, 28, 28]       2,359,808
             ReLU-23          [-1, 512, 28, 28]               0
        MaxPool2d-24          [-1, 512, 14, 14]               0
           Conv2d-25          [-1, 512, 14, 14]       2,359,808
             ReLU-26          [-1, 512, 14, 14]               0
           Conv2d-27          [-1, 512, 14, 14]       2,359,808
             ReLU-28          [-1, 512, 14, 14]               0
           Conv2d-29          [-1, 512, 14, 14]       2,359,808
             ReLU-30          [-1, 512, 14, 14]               0
        MaxPool2d-31            [-1, 512, 7, 7]               0
           Linear-32                 [-1, 4096]     102,764,544
             ReLU-33                 [-1, 4096]               0
           Linear-34                 [-1, 4096]      16,781,312
             ReLU-35                 [-1, 4096]               0
           Linear-36                    [-1, 4]          16,388
================================================================
Total params: 134,276,932
Trainable params: 134,276,932
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 218.52
Params size (MB): 512.23
Estimated Total Size (MB): 731.32
----------------------------------------------------------------
VGG16(
  (block1): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU()
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU()
    (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
  )
  (block2): Sequential(
    (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU()
    (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU()
    (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
  )
  (block3): Sequential(
    (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU()
    (2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU()
    (4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (5): ReLU()
    (6): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
  )
  (block4): Sequential(
    (0): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU()
    (2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU()
    (4): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (5): ReLU()
    (6): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
  )
  (block5): Sequential(
    (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU()
    (2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU()
    (4): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (5): ReLU()
    (6): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
  )
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=4096, bias=True)
    (1): ReLU()
    (2): Linear(in_features=4096, out_features=4096, bias=True)
    (3): ReLU()
    (4): Linear(in_features=4096, out_features=4, bias=True)
  )
)

三、训练模型

1.编写训练函数

optimizer.zero_grad()
loss.backward()
optimizer.step()这三个函数是做参数更新,分别表示把1.梯度清零 2.反向传播 3.梯度更新

#训练循环
def train(dataloader,model,optimizer,loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    
    train_loss, train_acc = 0, 0
    
    for X,y in dataloader:
        X, y = X.to(device),y.to(device)
        
        #计算误差
        pred = model(X)
        loss = loss_fn(pred, y)
        
        #反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        #记录acc与loss
        train_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss += loss.item()
        
    train_acc /= size
    train_loss /= num_batches
    
    return train_acc,train_loss

2.编写测试函数

def test(dataloader,model,loss_fn):
    size        = len(dataloader.dataset)
    num_batches = len(dataloader)
    test_loss, test_acc =0, 0

    #当不训练时,梯度停止更新
    with torch.no_grad():
        for imgs,target in dataloader:
            imgs,target = imgs.to(device),target.to(device)
            
            #计算loss
            target_pred = model(imgs)
            loss        = loss_fn(target_pred,target)
            
            test_loss  += loss.item()
            test_acc   += (target_pred.argmax(1) == target).type(torch.float).sum().item()
            
    test_acc  /= size
    test_loss /= num_batches
    
    return test_acc, test_loss
            

 3.正式训练&设置超参数&保存最佳模型

import time 
import copy
'''设置超参数'''
start_epoch = 0
epochs      = 50
learn_rate  = 1e-3
loss_fn     = nn.CrossEntropyLoss() #创建损失函数
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)
type(optimizer)
#调用官方动态学习率接口时使用
lambda1 = lambda epoch: 0.92 ** (epoch // 4)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda=lambda1)

train_loss = []
train_acc  = []
test_loss  = []
test_acc   = []
epoch_best_acc = 0

'''加载之前保存的模型'''
if not os.path.exists(output) or not os.path.isdir(output):
    os.makedirs(output)
if start_epoch > 0:
    resumeFile = os.path.join(output, 'epoch'+str(start_epoch)+'.pkl')
    if not os.path.exists(resumeFile) or not os.path.isfile(resumeFile):
        start_epoch = 0 
    else:
        model.load_state_dict(torch.load(resumeFile))
        
'''开始训练模型'''
print('\nStart training...')
best_model = None
for epoch in range(start_epoch, epochs):
    
    model.train()
    epoch_train_acc,epoch_train_loss = train(train_dl,model,optimizer,loss_fn)
    scheduler.step()
    
    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
    
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    
    lr = optimizer.state_dict()['param_groups'][0]['lr']

    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
    print(time.strftime('[%Y-%m-%d %H:%M:%S]'), template.format(epoch+1, epoch_train_acc*100, epoch_train_loss, epoch_test_acc*100, epoch_test_loss, lr))

    # 保存最佳模型
    if epoch_test_acc>epoch_best_acc:
        ''' 保存最优模型参数 '''
        epoch_best_acc = epoch_test_acc
        best_model = copy.deepcopy(model)
        print(('acc = {:.1f}%, saving model to best.pkl').format(epoch_best_acc*100))
        saveFile = os.path.join(output, 'best.pkl')
        torch.save(best_model.state_dict(), saveFile)
print('Done\n')
    
    
''' 保存最新模型参数 '''
saveFile = os.path.join(output, 'epoch'+str(epochs)+'.pkl')
torch.save(model.state_dict(), saveFile)

第一次做的时候,报了很多次错


Start training...
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
Cell In[7], line 35
     32 for epoch in range(start_epoch, epochs):
     34     model.train()
---> 35     epoch_train_acc,epoch_train_loss = train(train_dl, model, loss_fn, optimizer)
     36     scheduler.step()
     38     model.eval()

Cell In[5], line 13, in train(dataloader, model, optimizer, loss_fn)
     11 #计算误差
     12 pred = model(X)
---> 13 loss = loss_fn(pred, y)
     15 #反向传播
     16 optimizer.zero_grad()

TypeError: 'Adam' object is not callable
 

但是实际我并没有将优化器传给train函数,不过我改了一个模型的参数位置就不报这个错了? 这个error我还没理解,就自己解决了。。。

第二个报错:

解决办法:我把batch_size改成了16,但是依然不行,最后无奈改到4,但是训练了500轮都没办法收敛,所以我利用了一些资源,换到了3090的显卡上跑,实力不够。。。硬件来凑。。。

 以下是我用Adam优化器的结果

Start training...
[2023-11-30 18:08:23] Epoch: 1, Train_acc:93.1%, Train_loss:0.334, Test_acc:96.7%, Test_loss:0.094, Lr:1.00E-03
acc = 96.7%, saving model to best.pkl
[2023-11-30 18:08:34] Epoch: 2, Train_acc:98.2%, Train_loss:0.049, Test_acc:97.5%, Test_loss:0.098, Lr:1.00E-03
acc = 97.5%, saving model to best.pkl
[2023-11-30 18:08:45] Epoch: 3, Train_acc:98.5%, Train_loss:0.044, Test_acc:96.2%, Test_loss:0.165, Lr:1.00E-03
[2023-11-30 18:08:55] Epoch: 4, Train_acc:97.6%, Train_loss:0.092, Test_acc:97.5%, Test_loss:0.085, Lr:9.20E-04
[2023-11-30 18:09:05] Epoch: 5, Train_acc:98.2%, Train_loss:0.069, Test_acc:95.8%, Test_loss:0.076, Lr:9.20E-04
[2023-11-30 18:09:15] Epoch: 6, Train_acc:98.9%, Train_loss:0.034, Test_acc:96.7%, Test_loss:0.070, Lr:9.20E-04
[2023-11-30 18:09:25] Epoch: 7, Train_acc:97.5%, Train_loss:0.058, Test_acc:97.9%, Test_loss:0.058, Lr:9.20E-04
acc = 97.9%, saving model to best.pkl
[2023-11-30 18:09:39] Epoch: 8, Train_acc:98.1%, Train_loss:0.074, Test_acc:94.2%, Test_loss:0.216, Lr:8.46E-04
[2023-11-30 18:09:51] Epoch: 9, Train_acc:98.1%, Train_loss:0.065, Test_acc:96.7%, Test_loss:0.138, Lr:8.46E-04
[2023-11-30 18:10:01] Epoch:10, Train_acc:97.7%, Train_loss:0.109, Test_acc:95.4%, Test_loss:0.127, Lr:8.46E-04
[2023-11-30 18:10:11] Epoch:11, Train_acc:97.0%, Train_loss:0.165, Test_acc:92.1%, Test_loss:2.717, Lr:8.46E-04
[2023-11-30 18:10:21] Epoch:12, Train_acc:90.8%, Train_loss:0.402, Test_acc:97.5%, Test_loss:0.108, Lr:7.79E-04
[2023-11-30 18:10:31] Epoch:13, Train_acc:95.1%, Train_loss:0.139, Test_acc:95.0%, Test_loss:0.173, Lr:7.79E-04
[2023-11-30 18:10:41] Epoch:14, Train_acc:96.0%, Train_loss:0.111, Test_acc:97.5%, Test_loss:0.075, Lr:7.79E-04
[2023-11-30 18:10:51] Epoch:15, Train_acc:93.2%, Train_loss:0.369, Test_acc:86.2%, Test_loss:0.349, Lr:7.79E-04
[2023-11-30 18:11:01] Epoch:16, Train_acc:94.6%, Train_loss:0.160, Test_acc:95.8%, Test_loss:0.132, Lr:7.16E-04
[2023-11-30 18:11:11] Epoch:17, Train_acc:96.2%, Train_loss:0.117, Test_acc:94.6%, Test_loss:0.168, Lr:7.16E-04
[2023-11-30 18:11:21] Epoch:18, Train_acc:95.8%, Train_loss:0.121, Test_acc:97.9%, Test_loss:0.064, Lr:7.16E-04
[2023-11-30 18:11:31] Epoch:19, Train_acc:97.7%, Train_loss:0.074, Test_acc:97.9%, Test_loss:0.049, Lr:7.16E-04
[2023-11-30 18:11:41] Epoch:20, Train_acc:97.6%, Train_loss:0.062, Test_acc:96.2%, Test_loss:0.090, Lr:6.59E-04
[2023-11-30 18:11:51] Epoch:21, Train_acc:99.3%, Train_loss:0.028, Test_acc:98.3%, Test_loss:0.051, Lr:6.59E-04
acc = 98.3%, saving model to best.pkl
[2023-11-30 18:12:02] Epoch:22, Train_acc:99.0%, Train_loss:0.022, Test_acc:97.9%, Test_loss:0.055, Lr:6.59E-04
[2023-11-30 18:12:12] Epoch:23, Train_acc:99.1%, Train_loss:0.024, Test_acc:97.5%, Test_loss:0.080, Lr:6.59E-04
[2023-11-30 18:12:22] Epoch:24, Train_acc:99.8%, Train_loss:0.006, Test_acc:97.9%, Test_loss:0.076, Lr:6.06E-04
[2023-11-30 18:12:32] Epoch:25, Train_acc:99.0%, Train_loss:0.038, Test_acc:92.1%, Test_loss:0.394, Lr:6.06E-04
[2023-11-30 18:12:42] Epoch:26, Train_acc:94.9%, Train_loss:0.159, Test_acc:97.5%, Test_loss:0.081, Lr:6.06E-04
[2023-11-30 18:12:52] Epoch:27, Train_acc:98.0%, Train_loss:0.054, Test_acc:96.2%, Test_loss:0.117, Lr:6.06E-04
[2023-11-30 18:13:02] Epoch:28, Train_acc:99.3%, Train_loss:0.022, Test_acc:95.8%, Test_loss:0.071, Lr:5.58E-04
[2023-11-30 18:13:13] Epoch:29, Train_acc:99.1%, Train_loss:0.020, Test_acc:98.8%, Test_loss:0.073, Lr:5.58E-04
acc = 98.8%, saving model to best.pkl
[2023-11-30 18:13:23] Epoch:30, Train_acc:99.4%, Train_loss:0.018, Test_acc:97.1%, Test_loss:0.171, Lr:5.58E-04
[2023-11-30 18:13:33] Epoch:31, Train_acc:99.8%, Train_loss:0.012, Test_acc:99.2%, Test_loss:0.039, Lr:5.58E-04
acc = 99.2%, saving model to best.pkl
[2023-11-30 18:13:45] Epoch:32, Train_acc:99.9%, Train_loss:0.004, Test_acc:98.8%, Test_loss:0.044, Lr:5.13E-04
[2023-11-30 18:13:55] Epoch:33, Train_acc:99.9%, Train_loss:0.002, Test_acc:98.3%, Test_loss:0.041, Lr:5.13E-04
[2023-11-30 18:14:05] Epoch:34, Train_acc:100.0%, Train_loss:0.001, Test_acc:98.3%, Test_loss:0.052, Lr:5.13E-04
[2023-11-30 18:14:15] Epoch:35, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.8%, Test_loss:0.046, Lr:5.13E-04
[2023-11-30 18:14:26] Epoch:36, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.051, Lr:4.72E-04
[2023-11-30 18:14:36] Epoch:37, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.052, Lr:4.72E-04
[2023-11-30 18:14:46] Epoch:38, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.067, Lr:4.72E-04
[2023-11-30 18:14:57] Epoch:39, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.059, Lr:4.72E-04
[2023-11-30 18:15:07] Epoch:40, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.061, Lr:4.34E-04
[2023-11-30 18:15:17] Epoch:41, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.8%, Test_loss:0.078, Lr:4.34E-04
[2023-11-30 18:15:27] Epoch:42, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.072, Lr:4.34E-04
[2023-11-30 18:15:37] Epoch:43, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.068, Lr:4.34E-04
[2023-11-30 18:15:48] Epoch:44, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.070, Lr:4.00E-04
[2023-11-30 18:15:58] Epoch:45, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.072, Lr:4.00E-04
[2023-11-30 18:16:08] Epoch:46, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.074, Lr:4.00E-04
[2023-11-30 18:16:18] Epoch:47, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.083, Lr:4.00E-04
[2023-11-30 18:16:28] Epoch:48, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.077, Lr:3.68E-04
[2023-11-30 18:16:39] Epoch:49, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.078, Lr:3.68E-04
[2023-11-30 18:16:49] Epoch:50, Train_acc:100.0%, Train_loss:0.000, Test_acc:98.3%, Test_loss:0.080, Lr:3.68E-04
Done

 我再换了SGD优化器试了试

四、结果可视化

''' 结果可视化 '''
def displayResult(train_acc, test_acc, train_loss, test_loss, start_epoch, epochs, output=''):

    plt.rcParams['font.sans-serif']    = ['SimHei']  # 用来正常显示中文标签
    plt.rcParams['axes.unicode_minus'] = False       # 用来正常显示负号
    plt.rcParams['figure.dpi']         = 100         # 分辨率
    
    epochs_range = range(start_epoch, epochs)
    
    plt.figure('Result Visualization', figsize=(12, 3))
    plt.subplot(1, 2, 1)
    
    plt.plot(epochs_range, train_acc, label='Training Accuracy')
    plt.plot(epochs_range, test_acc, label='Test Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Validation Accuracy')
    
    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, train_loss, label='Training Loss')
    plt.plot(epochs_range, test_loss, label='Test Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Validation Loss')
    plt.savefig(os.path.join(output, 'AccuracyLoss.png'))
    plt.show()

''' 绘制准确率&损失率曲线图 '''
displayResult(train_acc, test_acc, train_loss, test_loss, start_epoch, epochs, output)

 五、加载模型并预测指定图片

''' 预测函数 '''
import PIL
from PIL import Image

def predict(model, img_path):
    img = Image.open(img_path)
    test_transforms = torchvision.transforms.Compose([
        torchvision.transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
        torchvision.transforms.ToTensor(),          # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
        torchvision.transforms.Normalize(           # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
            mean=[0.485, 0.456, 0.406], 
            std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
    ])
    img = test_transforms(img)
    img = img.to(device).unsqueeze(0)
    output = model(img)
    #print(output.argmax(1))
    
    _, indices = torch.max(output, 1)
    percentage = torch.nn.functional.softmax(output, dim=1)[0] * 100
    perc = percentage[int(indices)].item()
    result = classeNames[indices]
    print('predicted:', result, perc)


if __name__=='__main__':
    classeNames = list({'Dark': 0, 'Green': 1, 'Light': 2, 'Medium': 3})
    num_classes = len(classeNames)
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using {} device\n".format(device))
    
    model = VGG16().to(device)  # 加载自定义的VGG16模型
    model.load_state_dict(torch.load(os.path.join(r'C:\Users\ubuntu\output', 'best.pkl')))
    model.eval()
    
    img_path = r'G:\49-data\Light\light (1).png'
    predict(model, img_path)
Using cuda device

predicted: Light 100.0

 个人总结

1.去研究了一下VGG-16这个模型,进而有点好奇VGG-19的效果怎么样

2.关于轻量化模型,在block4,block5中讲卷积核换成1*1,减少输入通道数?

  • 40
    点赞
  • 24
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值