P6:Pytorch实战:好莱坞明星识别

🍨 本文为🔗365天深度学习训练营中的学习记录博客
🍖 原作者:K同学啊 | 接辅导、项目定制
🚀 文章来源:K同学的学习圈子

前面的导入数据套路化模板快速带过

1. 设置GPU

import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import transforms, datasets
import os,PIL,pathlib,warnings

warnings.filterwarnings("ignore")             #忽略警告信息

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device

2. 导入数据

import os,PIL,random,pathlib

data_dir = './6-data/'
data_dir = pathlib.Path(data_dir)

data_paths  = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[1] for path in data_paths]
classeNames
['Angelina Jolie',
 'Brad Pitt',
 'Denzel Washington',
 'Hugh Jackman',
 'Jennifer Lawrence',
 'Johnny Depp',
 'Kate Winslet',
 'Leonardo DiCaprio',
 'Megan Fox',
 'Natalie Portman',
 'Nicole Kidman',
 'Robert Downey Jr',
 'Sandra Bullock',
 'Scarlett Johansson',
 'Tom Cruise',
 'Tom Hanks',
 'Will Smith']

一共有17个明星的图片

# 关于transforms.Compose的更多介绍可以参考:https://blog.csdn.net/qq_38251616/article/details/124878863
train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    # transforms.RandomHorizontalFlip(), # 随机水平翻转
    transforms.ToTensor(),          # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(           # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

total_data = datasets.ImageFolder("./data6/",transform=train_transforms)
total_data

图片处理

total_data.class_to_idx
{'Angelina Jolie': 0,
 'Brad Pitt': 1,
 'Denzel Washington': 2,
 'Hugh Jackman': 3,
 'Jennifer Lawrence': 4,
 'Johnny Depp': 5,
 'Kate Winslet': 6,
 'Leonardo DiCaprio': 7,
 'Megan Fox': 8,
 'Natalie Portman': 9,
 'Nicole Kidman': 10,
 'Robert Downey Jr': 11,
 'Sandra Bullock': 12,
 'Scarlett Johansson': 13,
 'Tom Cruise': 14,
 'Tom Hanks': 15,
 'Will Smith': 16}

每一个明星的图片对应一个标号

3.划分数据集

train_size = int(0.8 * len(total_data))
test_size  = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])
train_dataset, test_dataset
batch_size = 32

train_dl = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=1)
test_dl = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          num_workers=1)

4.调用官方的VGG-16模型

VGG-16结构说明:

  • 13个卷积层(Convolutional Layer),分别用blockX_convX表示;
  • 3个全连接层(Fully connected Layer),用classifier表示;
  • 5个池化层(Pool layer)。

VGG-16包含了16个隐藏层(13个卷积层和3个全连接层),故称为VGG-16

from torchvision.models import vgg16

device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
    
# 加载预训练模型,并且对模型进行微调
model = vgg16(pretrained = True).to(device) # 加载预训练的vgg16模型

for param in model.parameters():
    param.requires_grad = False # 冻结模型的参数,这样子在训练的时候只训练最后一层的参数

# 修改classifier模块的第6层(即:(6): Linear(in_features=4096, out_features=2, bias=True))
# 注意查看我们下方打印出来的模型
model.classifier._modules['6'] = nn.Linear(4096,len(classeNames)) # 修改vgg16模型中最后一层全连接层,输出目标类别个数
model.to(device)  
model

len(calssNames)代表分类数目,本例子是17例

5编写训练函数和测试函数

# 训练循环
def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)  # 训练集的大小
    num_batches = len(dataloader)   # 批次数目, (size/batch_size,向上取整)

    train_loss, train_acc = 0, 0  # 初始化训练损失和正确率
    
    for X, y in dataloader:  # 获取图片及其标签
        X, y = X.to(device), y.to(device)
        
        # 计算预测误差
        pred = model(X)          # 网络输出
        loss = loss_fn(pred, y)  # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失
        
        # 反向传播
        optimizer.zero_grad()  # grad属性归零
        loss.backward()        # 反向传播
        optimizer.step()       # 每一步自动更新
        
        # 记录acc与loss
        train_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss += loss.item()
            
    train_acc  /= size
    train_loss /= num_batches

    return train_acc, train_loss
def test (dataloader, model, loss_fn):
    size        = len(dataloader.dataset)  # 测试集的大小
    num_batches = len(dataloader)          # 批次数目, (size/batch_size,向上取整)
    test_loss, test_acc = 0, 0
    
    # 当不进行训练时,停止梯度更新,节省计算内存消耗
    with torch.no_grad():
        for imgs, target in dataloader:
            imgs, target = imgs.to(device), target.to(device)
            
            # 计算loss
            target_pred = model(imgs)
            loss        = loss_fn(target_pred, target)
            
            test_loss += loss.item()
            test_acc  += (target_pred.argmax(1) == target).type(torch.float).sum().item()

    test_acc  /= size
    test_loss /= num_batches

    return test_acc, test_loss

6.设置动态学习率(调用官方动态学习率接口

我是用的是调用官方动态学习率接口,绿色字体部分是另外一种自定义函数的方法

print("---------------------4.3 设置动态学习率------------------")
'''
自定义动态学习率:
def adjust_learning_rate(optimizer, epoch, start_lr):
     # 每 2 个epoch衰减到原来的 0.98
     lr = start_lr * (0.92 ** (epoch // 2))
     for param_group in optimizer.param_groups:
         param_group['lr'] = lr

learn_rate = 1e-4 # 初始学习率
optimizer  = torch.optim.SGD(model.parameters(), lr=learn_rate)

'''
# 调用官方的动态学习率方法,下面几行与三引号注释里的代码使等价的
learn_rate = 1e-4 # 初始学习率
# 调用官方动态学习率接口时使用
lambda1 = lambda epoch: 0.92 ** (epoch // 4)
optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1) #选定调整方法

7.正式训练

'''
model.train()、model.eval()训练营往期文章中有详细的介绍。

请注意观察保存最佳模型的方式,与TensorFlow2的保存方式有何异同。
'''
print("---------------------4.4 正式训练------------------")
import copy

loss_fn    = nn.CrossEntropyLoss() # 创建损失函数
epochs     = 40
train_loss = []
train_acc  = []
test_loss  = []
test_acc   = []
best_acc = 0    # 设置一个最佳准确率,作为最佳模型的判别指标

for epoch in range(epochs):
    # 更新学习率(使用自定义学习率时使用)
    # adjust_learning_rate(optimizer, epoch, learn_rate)
    
    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, optimizer)
    scheduler.step() # 更新学习率(调用官方动态学习率接口时使用)
    
    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
    
    # 保存最佳模型到 best_model
    if epoch_test_acc > best_acc:
        best_acc   = epoch_test_acc
        best_model = copy.deepcopy(model)
    
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    
    # 获取当前的学习率
    lr = optimizer.state_dict()['param_groups'][0]['lr']
    
    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
    print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss, 
                          epoch_test_acc*100, epoch_test_loss, lr))
    
# 保存最佳模型到文件中
PATH = './6model/best_model.pth'  # 保存的参数文件名
torch.save(model.state_dict(), PATH)

print('Done')

经过40轮训练后,测试集准确率只能达到22.2%

Epoch: 1, Train_acc:18.9%, Train_loss:2.481, Test_acc:17.8%, Test_loss:2.492, Lr:5.58E-05
Epoch: 2, Train_acc:18.8%, Train_loss:2.475, Test_acc:17.8%, Test_loss:2.511, Lr:5.58E-05
Epoch: 3, Train_acc:20.3%, Train_loss:2.472, Test_acc:18.1%, Test_loss:2.502, Lr:5.58E-05
Epoch: 4, Train_acc:20.3%, Train_loss:2.472, Test_acc:18.3%, Test_loss:2.499, Lr:5.58E-05
Epoch: 5, Train_acc:18.6%, Train_loss:2.483, Test_acc:18.3%, Test_loss:2.486, Lr:5.13E-05
Epoch: 6, Train_acc:19.4%, Train_loss:2.456, Test_acc:18.6%, Test_loss:2.489, Lr:5.13E-05
Epoch: 7, Train_acc:19.4%, Train_loss:2.469, Test_acc:18.9%, Test_loss:2.483, Lr:5.13E-05
Epoch: 8, Train_acc:20.6%, Train_loss:2.443, Test_acc:19.2%, Test_loss:2.483, Lr:5.13E-05
Epoch: 9, Train_acc:20.1%, Train_loss:2.443, Test_acc:18.9%, Test_loss:2.455, Lr:4.72E-05
Epoch:10, Train_acc:19.2%, Train_loss:2.438, Test_acc:18.6%, Test_loss:2.487, Lr:4.72E-05
Epoch:11, Train_acc:19.6%, Train_loss:2.453, Test_acc:18.6%, Test_loss:2.448, Lr:4.72E-05
Epoch:12, Train_acc:19.5%, Train_loss:2.446, Test_acc:18.3%, Test_loss:2.453, Lr:4.72E-05
Epoch:13, Train_acc:19.4%, Train_loss:2.444, Test_acc:18.6%, Test_loss:2.462, Lr:4.34E-05
Epoch:14, Train_acc:19.9%, Train_loss:2.433, Test_acc:18.6%, Test_loss:2.459, Lr:4.34E-05
Epoch:15, Train_acc:21.2%, Train_loss:2.421, Test_acc:19.4%, Test_loss:2.426, Lr:4.34E-05
Epoch:16, Train_acc:21.5%, Train_loss:2.423, Test_acc:19.4%, Test_loss:2.438, Lr:4.34E-05
Epoch:17, Train_acc:22.5%, Train_loss:2.405, Test_acc:19.4%, Test_loss:2.444, Lr:4.00E-05
Epoch:18, Train_acc:20.3%, Train_loss:2.423, Test_acc:20.0%, Test_loss:2.424, Lr:4.00E-05
Epoch:19, Train_acc:19.2%, Train_loss:2.427, Test_acc:20.6%, Test_loss:2.438, Lr:4.00E-05
Epoch:20, Train_acc:20.5%, Train_loss:2.417, Test_acc:21.1%, Test_loss:2.426, Lr:4.00E-05
Epoch:21, Train_acc:21.4%, Train_loss:2.400, Test_acc:20.8%, Test_loss:2.443, Lr:3.68E-05
Epoch:22, Train_acc:20.1%, Train_loss:2.413, Test_acc:21.4%, Test_loss:2.420, Lr:3.68E-05
Epoch:23, Train_acc:20.7%, Train_loss:2.401, Test_acc:21.7%, Test_loss:2.427, Lr:3.68E-05
Epoch:24, Train_acc:22.1%, Train_loss:2.393, Test_acc:21.7%, Test_loss:2.427, Lr:3.68E-05
Epoch:25, Train_acc:21.5%, Train_loss:2.388, Test_acc:21.9%, Test_loss:2.426, Lr:3.38E-05
Epoch:26, Train_acc:21.0%, Train_loss:2.392, Test_acc:21.7%, Test_loss:2.392, Lr:3.38E-05
Epoch:27, Train_acc:21.1%, Train_loss:2.393, Test_acc:21.7%, Test_loss:2.414, Lr:3.38E-05
Epoch:28, Train_acc:20.1%, Train_loss:2.379, Test_acc:21.7%, Test_loss:2.433, Lr:3.38E-05
Epoch:29, Train_acc:21.6%, Train_loss:2.379, Test_acc:21.7%, Test_loss:2.417, Lr:3.11E-05
Epoch:30, Train_acc:21.2%, Train_loss:2.380, Test_acc:21.7%, Test_loss:2.403, Lr:3.11E-05
Epoch:31, Train_acc:22.2%, Train_loss:2.376, Test_acc:21.7%, Test_loss:2.384, Lr:3.11E-05
Epoch:32, Train_acc:22.6%, Train_loss:2.380, Test_acc:21.7%, Test_loss:2.400, Lr:3.11E-05
Epoch:33, Train_acc:21.9%, Train_loss:2.356, Test_acc:21.7%, Test_loss:2.404, Lr:2.86E-05
Epoch:34, Train_acc:22.8%, Train_loss:2.357, Test_acc:21.9%, Test_loss:2.396, Lr:2.86E-05
Epoch:35, Train_acc:21.9%, Train_loss:2.363, Test_acc:22.2%, Test_loss:2.400, Lr:2.86E-05
Epoch:36, Train_acc:21.7%, Train_loss:2.355, Test_acc:22.2%, Test_loss:2.415, Lr:2.86E-05
Epoch:37, Train_acc:22.3%, Train_loss:2.359, Test_acc:22.2%, Test_loss:2.397, Lr:2.63E-05
Epoch:38, Train_acc:22.6%, Train_loss:2.352, Test_acc:22.2%, Test_loss:2.400, Lr:2.63E-05
Epoch:39, Train_acc:21.5%, Train_loss:2.367, Test_acc:22.2%, Test_loss:2.364, Lr:2.63E-05
Epoch:40, Train_acc:21.7%, Train_loss:2.348, Test_acc:22.2%, Test_loss:2.388, Lr:2.63E-05

8.预测数据集中的某张图片

from PIL import Image 

classes = list(total_data.class_to_idx)

def predict_one_image(image_path, model, transform, classes):
    
    test_img = Image.open(image_path).convert('RGB')
    plt.imshow(test_img)  # 展示预测的图片

    test_img = transform(test_img)
    img = test_img.to(device).unsqueeze(0)
    
    model.eval()
    output = model(img)

    _,pred = torch.max(output,1)
    pred_class = classes[pred]
    print(f'预测结果是:{pred_class}')
# 预测训练集中的某张照片
predict_one_image(image_path='./6-data/Angelina Jolie/001_fe3347c0.jpg', 
                  model=model, 
                  transform=train_transforms, 
                  classes=classes)

9. 模型评估

best_model.eval()
epoch_test_acc, epoch_test_loss = test(test_dl, best_model, loss_fn)
# 查看是否与我们记录的最高准确率一致
epoch_test_acc

确实与我记录的最高准确率一致

10测试集准确率提升

(1)简单的调参

一开始只是做了简单调参,初始学习率1e-4,变为1e-2,batch_size由32变为64,学习率改为每两轮衰减一次

#原来lambda1 = lambda epoch: 0.92 ** (epoch // 4)
#修改一:每两轮衰减一次
lambda1 = lambda epoch: 0.92 ** (epoch // 2)

最后测试集准确率达到42.5%

(2)模型改进

对模型进行修改,引入BN层和dropout。

修改了模型的分类器部分,将原来的最后一层全连接层替换为新的结构,新结构包含了三个线性层、批归一化层、dropout 层和 softmax 层。

from torchvision.models import vgg16

device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
    
# 加载预训练模型,并且对模型进行微调
model = vgg16(pretrained = True).to(device) # 加载预训练的vgg16模型

for param in model.parameters():
    param.requires_grad = False # 冻结模型的参数,这样子在训练的时候只训练最后一层的参数


# 修改classifier模型的第6层(即:(6):Linear(in_features=4096, out_features=2, bias=True))
# 注意查看打印的模型
# model.classifier._modules['6'] = nn.Linear(4096, len(classNames)) # 修改vgg16模型中最后一层全连接层,输出目标类别个数
model.classifier = nn.Sequential(
            # 14
            nn.Linear(25088, 1024),
            nn.BatchNorm1d(1024),
            # nn.ReLU(True),
            nn.Dropout(0.4),
            # 15
            nn.Linear(1024, 128),
            nn.BatchNorm1d(128),
            # nn.ReLU(True),
            nn.Dropout(0.4),
            # 16
            nn.Linear(128, len(classeNames)),
            nn.Softmax()
        )
model.to(device)
print(model)

这次最高测试集准确率达到58%。

11个人总结

这次的练习主要学会不仅通过调参提高模型准确率,也学会了修改模型分类器的方式提高准确率。

  • 9
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值