学习笔记|Pytorch使用教程28(模型finetune)

39 篇文章 24 订阅

学习笔记|Pytorch使用教程28

本学习笔记主要摘自“深度之眼”,做一个总结,方便查阅。
使用Pytorch版本为1.2
分享一个关于冻结/解冻的方法:知乎

  • Transfer Learning & Model Finetune
  • PyTorch中的Finetune

一.Transfer Learning & Model Finetune

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

二.PyTorch中的Finetune

模型微调步骤:

  • 1.获取预训练模型参数
  • 2.加载模型(load. state dict )
  • 3.修改输出层

模型微调训练方法:

  • 1.固定预训练的参数(requires. grad =False ; lr=0)
  • 2.Features Extractor较小学习率( params group )

在这里插入图片描述
首先构建AntsDataset

class AntsDataset(Dataset):
    def __init__(self, data_dir, transform=None):

        self.label_name = {"ants": 0, "bees": 1}
        self.data_info = self.get_img_info(data_dir)  # data_info存储所有图片路径和标签,在DataLoader中通过index读取样本
        self.transform = transform

    def __getitem__(self, index):
        path_img, label = self.data_info[index]
        img = Image.open(path_img).convert('RGB')     # 0~255

        if self.transform is not None:
            img = self.transform(img)   # 在这里做transform,转为tensor等等

        return img, label

    def __len__(self):
        return len(self.data_info)

    def get_img_info(self,data_dir):
        data_info = list()
        for root, dirs, _ in os.walk(data_dir):
            # 遍历类别
            for sub_dir in dirs:
                img_names = os.listdir(os.path.join(root, sub_dir))
                img_names = list(filter(lambda x: x.endswith('.jpg'), img_names))

                # 遍历图片
                for i in range(len(img_names)):
                    img_name = img_names[i]
                    path_img = os.path.join(root, sub_dir, img_name)
                    label = self.label_name[sub_dir]
                    data_info.append((path_img, int(label)))

        if len(data_info) == 0:
            raise Exception("\ndata_dir:{} is a empty dir! Please checkout your path to images!".format(data_dir))
        return data_info

完整测试代码:

import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torch.optim as optim
from matplotlib import pyplot as plt
from tools.my_dataset import AntsDataset
from tools.common_tools import set_seed
import torchvision.models as models
import torchvision
BASEDIR = os.path.dirname(os.path.abspath(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("use device :{}".format(device))

set_seed(1)  # 设置随机种子
label_name = {"ants": 0, "bees": 1}

# 参数设置
MAX_EPOCH = 25
BATCH_SIZE = 16
LR = 0.001
log_interval = 10
val_interval = 1
classes = 2
start_epoch = -1
lr_decay_step = 7


# ============================ step 1/5 数据 ============================
data_dir = os.path.join(BASEDIR, "..", "..", "data/hymenoptera_data")
train_dir = os.path.join(data_dir, "train")
valid_dir = os.path.join(data_dir, "val")

norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]

train_transform = transforms.Compose([
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

valid_transform = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

# 构建MyDataset实例
train_data = AntsDataset(data_dir=train_dir, transform=train_transform)
valid_data = AntsDataset(data_dir=valid_dir, transform=valid_transform)

# 构建DataLoder
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)

# ============================ step 2/5 模型 ============================

# 1/3 构建模型
resnet18_ft = models.resnet18()

# 2/3 加载参数
flag = 0
# flag = 1
if flag:
    path_pretrained_model = os.path.join(BASEDIR, "..", "..", "data/resnet18-5c106cde.pth")
    state_dict_load = torch.load(path_pretrained_model)
    resnet18_ft.load_state_dict(state_dict_load)

# 法1 : 冻结卷积层
flag_m1 = 0
# flag_m1 = 1
if flag_m1:
    for param in resnet18_ft.parameters():
        param.requires_grad = False
    print("conv1.weights[0, 0, ...]:\n {}".format(resnet18_ft.conv1.weight[0, 0, ...]))


# 3/3 替换fc层
num_ftrs = resnet18_ft.fc.in_features
resnet18_ft.fc = nn.Linear(num_ftrs, classes)


resnet18_ft.to(device)
# ============================ step 3/5 损失函数 ============================
criterion = nn.CrossEntropyLoss()                                                   # 选择损失函数

# ============================ step 4/5 优化器 ============================
# 法2 : conv 小学习率
flag = 0
# flag = 1
if flag:
    fc_params_id = list(map(id, resnet18_ft.fc.parameters()))     # 返回的是parameters的 内存地址
    base_params = filter(lambda p: id(p) not in fc_params_id, resnet18_ft.parameters())
    optimizer = optim.SGD([
        {'params': base_params, 'lr': LR*0},   # 0
        {'params': resnet18_ft.fc.parameters(), 'lr': LR}], momentum=0.9)

else:
    optimizer = optim.SGD(resnet18_ft.parameters(), lr=LR, momentum=0.9)               # 选择优化器

scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_decay_step, gamma=0.1)     # 设置学习率下降策略


# ============================ step 5/5 训练 ============================
train_curve = list()
valid_curve = list()

for epoch in range(start_epoch + 1, MAX_EPOCH):

    loss_mean = 0.
    correct = 0.
    total = 0.

    resnet18_ft.train()
    for i, data in enumerate(train_loader):

        # forward
        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = resnet18_ft(inputs)

        # backward
        optimizer.zero_grad()
        loss = criterion(outputs, labels)
        loss.backward()

        # update weights
        optimizer.step()

        # 统计分类情况
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).squeeze().cpu().sum().numpy()

        # 打印训练信息
        loss_mean += loss.item()
        train_curve.append(loss.item())
        if (i+1) % log_interval == 0:
            loss_mean = loss_mean / log_interval
            print("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))
            loss_mean = 0.

            # if flag_m1:
            print("epoch:{} conv1.weights[0, 0, ...] :\n {}".format(epoch, resnet18_ft.conv1.weight[0, 0, ...]))

    scheduler.step()  # 更新学习率

    # validate the model
    if (epoch+1) % val_interval == 0:

        correct_val = 0.
        total_val = 0.
        loss_val = 0.
        resnet18_ft.eval()
        with torch.no_grad():
            for j, data in enumerate(valid_loader):
                inputs, labels = data
                inputs, labels = inputs.to(device), labels.to(device)

                outputs = resnet18_ft(inputs)
                loss = criterion(outputs, labels)

                _, predicted = torch.max(outputs.data, 1)
                total_val += labels.size(0)
                correct_val += (predicted == labels).squeeze().cpu().sum().numpy()

                loss_val += loss.item()

            loss_val_mean = loss_val/len(valid_loader)
            valid_curve.append(loss_val_mean)
            print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val_mean, correct_val / total_val))
        resnet18_ft.train()

train_x = range(len(train_curve))
train_y = train_curve

train_iters = len(train_loader)
valid_x = np.arange(1, len(valid_curve)+1) * train_iters*val_interval # 由于valid中记录的是epochloss,需要对记录点进行转换到iterations
valid_y = valid_curve

plt.plot(train_x, train_y, label='Train')
plt.plot(valid_x, valid_y, label='Valid')

plt.legend(loc='upper right')
plt.ylabel('loss value')
plt.xlabel('Iteration')
plt.show()

输出:

use device :cuda
Training:Epoch[000/025] Iteration[010/016] Loss: 0.7283 Acc:49.38%
epoch:0 conv1.weights[0, 0, ...] :
 tensor([[ 0.0395, -0.0116,  0.0342, -0.0307,  0.0298,  0.0031,  0.0097],
        [-0.0049,  0.0319,  0.0666,  0.0388,  0.0096, -0.0193, -0.0174],
        [ 0.0075, -0.0277, -0.0006, -0.0376,  0.0052, -0.0493, -0.0200],
        [ 0.0002,  0.0206, -0.0210, -0.0059,  0.0125, -0.0291, -0.0089],
        [-0.0003,  0.0149, -0.0235,  0.0127,  0.0009, -0.0142, -0.0263],
        [ 0.0133,  0.0055,  0.0211,  0.0162,  0.0200, -0.0261,  0.0222],
        [ 0.0015,  0.0067, -0.0032, -0.0021,  0.0130, -0.0060,  0.0133]],
       device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[000/025] Iteration[010/010] Loss: 0.6738 Acc:54.90%
Training:Epoch[001/025] Iteration[010/016] Loss: 0.6975 Acc:55.00%
......
Valid:   Epoch[021/025] Iteration[010/010] Loss: 0.5759 Acc:73.86%
Training:Epoch[022/025] Iteration[010/016] Loss: 0.5115 Acc:73.12%
epoch:22 conv1.weights[0, 0, ...] :
 tensor([[ 0.0382, -0.0111,  0.0356, -0.0281,  0.0345,  0.0083,  0.0154],
        [-0.0058,  0.0325,  0.0695,  0.0423,  0.0134, -0.0157, -0.0130],
        [ 0.0081, -0.0260,  0.0013, -0.0336,  0.0101, -0.0449, -0.0151],
        [ 0.0021,  0.0228, -0.0197, -0.0022,  0.0177, -0.0238, -0.0045],
        [ 0.0009,  0.0161, -0.0210,  0.0162,  0.0054, -0.0090, -0.0223],
        [ 0.0120,  0.0054,  0.0236,  0.0201,  0.0244, -0.0215,  0.0266],
        [-0.0014,  0.0048, -0.0023, -0.0002,  0.0148, -0.0022,  0.0179]],
       device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[022/025] Iteration[010/010] Loss: 0.6039 Acc:71.24%
Training:Epoch[023/025] Iteration[010/016] Loss: 0.5715 Acc:67.50%
epoch:23 conv1.weights[0, 0, ...] :
 tensor([[ 0.0382, -0.0111,  0.0356, -0.0281,  0.0345,  0.0083,  0.0154],
        [-0.0058,  0.0325,  0.0695,  0.0423,  0.0134, -0.0157, -0.0130],
        [ 0.0081, -0.0260,  0.0013, -0.0336,  0.0101, -0.0449, -0.0151],
        [ 0.0021,  0.0228, -0.0197, -0.0022,  0.0177, -0.0238, -0.0045],
        [ 0.0009,  0.0161, -0.0210,  0.0162,  0.0054, -0.0090, -0.0223],
        [ 0.0120,  0.0054,  0.0236,  0.0201,  0.0244, -0.0215,  0.0266],
        [-0.0014,  0.0048, -0.0023, -0.0002,  0.0148, -0.0022,  0.0179]],
       device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[023/025] Iteration[010/010] Loss: 0.5763 Acc:71.24%
Training:Epoch[024/025] Iteration[010/016] Loss: 0.5469 Acc:73.75%
epoch:24 conv1.weights[0, 0, ...] :
 tensor([[ 0.0382, -0.0111,  0.0356, -0.0281,  0.0345,  0.0083,  0.0154],
        [-0.0058,  0.0325,  0.0695,  0.0423,  0.0134, -0.0157, -0.0130],
        [ 0.0081, -0.0260,  0.0013, -0.0336,  0.0101, -0.0449, -0.0151],
        [ 0.0021,  0.0228, -0.0197, -0.0022,  0.0177, -0.0238, -0.0045],
        [ 0.0009,  0.0161, -0.0210,  0.0162,  0.0054, -0.0090, -0.0223],
        [ 0.0120,  0.0054,  0.0236,  0.0201,  0.0244, -0.0215,  0.0266],
        [-0.0014,  0.0048, -0.0023, -0.0002,  0.0148, -0.0022,  0.0179]],
       device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[024/025] Iteration[010/010] Loss: 0.5581 Acc:71.24%

在这里插入图片描述
效果不是特别好,因此加载预训练模型

# 2/3 加载参数
# flag = 0
flag = 1
if flag:
    path_pretrained_model = os.path.join(BASEDIR, "..", "..", "data/resnet18-5c106cde.pth")
    state_dict_load = torch.load(path_pretrained_model)
    resnet18_ft.load_state_dict(state_dict_load)

输出:

use device :cuda
Training:Epoch[000/025] Iteration[010/016] Loss: 0.7385 Acc:50.00%
epoch:0 conv1.weights[0, 0, ...] :
 tensor([[-0.0106, -0.0064, -0.0021,  0.0746,  0.0564,  0.0168, -0.0130],
        [ 0.0109,  0.0092, -0.1102, -0.2807, -0.2714, -0.1293,  0.0036],
        [-0.0071,  0.0587,  0.2951,  0.5870,  0.5195,  0.2561,  0.0634],
        [ 0.0303, -0.0674, -0.2988, -0.4390, -0.2711, -0.0008,  0.0574],
        [-0.0278,  0.0156,  0.0721, -0.0545, -0.3332, -0.4208, -0.2580],
        [ 0.0303,  0.0405,  0.0623,  0.2386,  0.4135,  0.3935,  0.1660],
        [-0.0140, -0.0041, -0.0245, -0.0662, -0.1509, -0.0822, -0.0057]],
       device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[000/025] Iteration[010/010] Loss: 0.3317 Acc:88.24%
Training:Epoch[001/025] Iteration[010/016] Loss: 0.3401 Acc:85.62%
epoch:1 conv1.weights[0, 0, ...] :
 tensor([[-0.0114, -0.0074, -0.0031,  0.0738,  0.0556,  0.0162, -0.0136],
        [ 0.0101,  0.0084, -0.1110, -0.2813, -0.2719, -0.1297,  0.0033],
        [-0.0078,  0.0580,  0.2944,  0.5865,  0.5192,  0.2558,  0.0631],
        [ 0.0297, -0.0681, -0.2995, -0.4394, -0.2714, -0.0010,  0.0573],
        [-0.0283,  0.0151,  0.0715, -0.0547, -0.3334, -0.4209, -0.2579],
        [ 0.0299,  0.0400,  0.0619,  0.2385,  0.4136,  0.3936,  0.1662],
        [-0.0144, -0.0046, -0.0248, -0.0661, -0.1506, -0.0819, -0.0054]],
       device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[001/025] Iteration[010/010] Loss: 0.2388 Acc:90.85%
Training:Epoch[002/025] Iteration[010/016] Loss: 0.3021 Acc:85.00%
epoch:2 conv1.weights[0, 0, ...] :
 tensor([[-0.0121, -0.0080, -0.0038,  0.0733,  0.0550,  0.0156, -0.0141],
        [ 0.0094,  0.0076, -0.1117, -0.2819, -0.2725, -0.1303,  0.0027],
        [-0.0086,  0.0573,  0.2938,  0.5859,  0.5186,  0.2553,  0.0626],
        [ 0.0289, -0.0689, -0.3002, -0.4399, -0.2719, -0.0014,  0.0567],
        [-0.0290,  0.0142,  0.0707, -0.0553, -0.3339, -0.4213, -0.2583],
        [ 0.0293,  0.0392,  0.0611,  0.2380,  0.4132,  0.3931,  0.1658],
        [-0.0152, -0.0054, -0.0255, -0.0667, -0.1511, -0.0824, -0.0058]],
       device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[002/025] Iteration[010/010] Loss: 0.2047 Acc:94.12%
Training:Epoch[003/025] Iteration[010/016] Loss: 0.2093 Acc:91.25%
epoch:3 conv1.weights[0, 0, ...] :
 tensor([[-0.0122, -0.0081, -0.0040,  0.0729,  0.0544,  0.0151, -0.0145],
        [ 0.0093,  0.0074, -0.1120, -0.2823, -0.2730, -0.1309,  0.0021],
        [-0.0088,  0.0571,  0.2934,  0.5855,  0.5181,  0.2547,  0.0619],
        [ 0.0287, -0.0691, -0.3006, -0.4403, -0.2723, -0.0019,  0.0562],
        [-0.0291,  0.0140,  0.0704, -0.0557, -0.3343, -0.4216, -0.2587],
        [ 0.0292,  0.0389,  0.0608,  0.2376,  0.4129,  0.3928,  0.1655],
        [-0.0154, -0.0058, -0.0258, -0.0670, -0.1515, -0.0827, -0.0061]],
       device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[003/025] Iteration[010/010] Loss: 0.1952 Acc:95.42%
Training:Epoch[004/025] Iteration[010/016] Loss: 0.3119 Acc:88.75%
epoch:4 conv1.weights[0, 0, ...] :
 tensor([[-0.0120, -0.0086, -0.0047,  0.0716,  0.0532,  0.0135, -0.0160],
        [ 0.0103,  0.0076, -0.1120, -0.2829, -0.2737, -0.1318,  0.0010],
        [-0.0080,  0.0570,  0.2932,  0.5849,  0.5174,  0.2536,  0.0607],
        [ 0.0294, -0.0692, -0.3008, -0.4410, -0.2730, -0.0029,  0.0551],
        [-0.0286,  0.0141,  0.0706, -0.0563, -0.3347, -0.4223, -0.2595],
        [ 0.0297,  0.0390,  0.0611,  0.2372,  0.4126,  0.3923,  0.1647],
        [-0.0144, -0.0055, -0.0256, -0.0675, -0.1518, -0.0831, -0.0066]],
       device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[004/025] Iteration[010/010] Loss: 0.2563 Acc:90.85%
Training:Epoch[005/025] Iteration[010/016] Loss: 0.1568 Acc:94.38%
epoch:5 conv1.weights[0, 0, ...] :
 tensor([[-1.1914e-02, -9.2948e-03, -5.4747e-03,  7.0540e-02,  5.2237e-02,
          1.2424e-02, -1.7096e-02],
        [ 1.0687e-02,  7.2866e-03, -1.1245e-01, -2.8357e-01, -2.7444e-01,
         -1.3269e-01,  9.2068e-05],
        [-7.6915e-03,  5.6600e-02,  2.9263e-01,  5.8414e-01,  5.1659e-01,
          2.5261e-01,  5.9574e-02],
        [ 2.9540e-02, -6.9700e-02, -3.0130e-01, -4.4178e-01, -2.7389e-01,
         -3.9087e-03,  5.3965e-02],
        [-2.8397e-02,  1.3780e-02,  7.0120e-02, -5.7001e-02, -3.3545e-01,
         -4.2310e-01, -2.6042e-01],
        [ 2.9838e-02,  3.8765e-02,  6.0738e-02,  2.3653e-01,  4.1202e-01,
          3.9173e-01,  1.6396e-01],
        [-1.4109e-02, -5.6574e-03, -2.5943e-02, -6.8227e-02, -1.5250e-01,
         -8.3795e-02, -7.1628e-03]], device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[005/025] Iteration[010/010] Loss: 0.2009 Acc:94.77%
Training:Epoch[006/025] Iteration[010/016] Loss: 0.1340 Acc:95.62%
......
epoch:24 conv1.weights[0, 0, ...] :
 tensor([[-1.1233e-02, -9.2924e-03, -5.8125e-03,  6.9800e-02,  5.1569e-02,
          1.2115e-02, -1.7353e-02],
        [ 1.1191e-02,  7.1573e-03, -1.1309e-01, -2.8462e-01, -2.7534e-01,
         -1.3321e-01, -2.6183e-04],
        [-7.2236e-03,  5.6297e-02,  2.9193e-01,  5.8307e-01,  5.1566e-01,
          2.5216e-01,  5.9334e-02],
        [ 3.0158e-02, -6.9869e-02, -3.0195e-01, -4.4273e-01, -2.7472e-01,
         -4.2718e-03,  5.3910e-02],
        [-2.7460e-02,  1.4040e-02,  6.9704e-02, -5.7888e-02, -3.3597e-01,
         -4.2325e-01, -2.6032e-01],
        [ 3.0869e-02,  3.9258e-02,  6.0584e-02,  2.3591e-01,  4.1185e-01,
          3.9188e-01,  1.6421e-01],
        [-1.3088e-02, -5.1690e-03, -2.6042e-02, -6.8676e-02, -1.5253e-01,
         -8.3346e-02, -6.6183e-03]], device='cuda:0', grad_fn=<SelectBackward>)
Valid:   Epoch[024/025] Iteration[010/010] Loss: 0.1847 Acc:94.12%

在这里插入图片描述
在第五个epoch的生活,就已经达到95%的准确率。
下面测试进行
卷积核冻结

# 法1 : 冻结卷积层
# flag_m1 = 0
flag_m1 = 1
if flag_m1:
    for param in resnet18_ft.parameters():
        param.requires_grad = False
    print("conv1.weights[0, 0, ...]:\n {}".format(resnet18_ft.conv1.weight[0, 0, ...]))

输出:

use device :cuda
conv1.weights[0, 0, ...]:
 tensor([[-0.0104, -0.0061, -0.0018,  0.0748,  0.0566,  0.0171, -0.0127],
        [ 0.0111,  0.0095, -0.1099, -0.2805, -0.2712, -0.1291,  0.0037],
        [-0.0069,  0.0591,  0.2955,  0.5872,  0.5197,  0.2563,  0.0636],
        [ 0.0305, -0.0670, -0.2984, -0.4387, -0.2709, -0.0006,  0.0576],
        [-0.0275,  0.0160,  0.0726, -0.0541, -0.3328, -0.4206, -0.2578],
        [ 0.0306,  0.0410,  0.0628,  0.2390,  0.4138,  0.3936,  0.1661],
        [-0.0137, -0.0037, -0.0241, -0.0659, -0.1507, -0.0822, -0.0058]])
Training:Epoch[000/025] Iteration[010/016] Loss: 0.7759 Acc:45.62%
epoch:0 conv1.weights[0, 0, ...] :
 tensor([[-0.0104, -0.0061, -0.0018,  0.0748,  0.0566,  0.0171, -0.0127],
        [ 0.0111,  0.0095, -0.1099, -0.2805, -0.2712, -0.1291,  0.0037],
        [-0.0069,  0.0591,  0.2955,  0.5872,  0.5197,  0.2563,  0.0636],
        [ 0.0305, -0.0670, -0.2984, -0.4387, -0.2709, -0.0006,  0.0576],
        [-0.0275,  0.0160,  0.0726, -0.0541, -0.3328, -0.4206, -0.2578],
        [ 0.0306,  0.0410,  0.0628,  0.2390,  0.4138,  0.3936,  0.1661],
        [-0.0137, -0.0037, -0.0241, -0.0659, -0.1507, -0.0822, -0.0058]],
       device='cuda:0')
Valid:   Epoch[000/025] Iteration[010/010] Loss: 0.4140 Acc:84.31%
Training:Epoch[001/025] Iteration[010/016] Loss: 0.4420 Acc:80.00%
epoch:1 conv1.weights[0, 0, ...] :
 tensor([[-0.0104, -0.0061, -0.0018,  0.0748,  0.0566,  0.0171, -0.0127],
        [ 0.0111,  0.0095, -0.1099, -0.2805, -0.2712, -0.1291,  0.0037],
        [-0.0069,  0.0591,  0.2955,  0.5872,  0.5197,  0.2563,  0.0636],
        [ 0.0305, -0.0670, -0.2984, -0.4387, -0.2709, -0.0006,  0.0576],
        [-0.0275,  0.0160,  0.0726, -0.0541, -0.3328, -0.4206, -0.2578],
        [ 0.0306,  0.0410,  0.0628,  0.2390,  0.4138,  0.3936,  0.1661],
        [-0.0137, -0.0037, -0.0241, -0.0659, -0.1507, -0.0822, -0.0058]],
       device='cuda:0')
Valid:   Epoch[001/025] Iteration[010/010] Loss: 0.2643 Acc:90.85%
Training:Epoch[002/025] Iteration[010/016] Loss: 0.3811 Acc:80.62%
epoch:2 conv1.weights[0, 0, ...] :
 tensor([[-0.0104, -0.0061, -0.0018,  0.0748,  0.0566,  0.0171, -0.0127],
        [ 0.0111,  0.0095, -0.1099, -0.2805, -0.2712, -0.1291,  0.0037],
        [-0.0069,  0.0591,  0.2955,  0.5872,  0.5197,  0.2563,  0.0636],
        [ 0.0305, -0.0670, -0.2984, -0.4387, -0.2709, -0.0006,  0.0576],
        [-0.0275,  0.0160,  0.0726, -0.0541, -0.3328, -0.4206, -0.2578],
        [ 0.0306,  0.0410,  0.0628,  0.2390,  0.4138,  0.3936,  0.1661],
        [-0.0137, -0.0037, -0.0241, -0.0659, -0.1507, -0.0822, -0.0058]],
       device='cuda:0')
Valid:   Epoch[002/025] Iteration[010/010] Loss: 0.2335 Acc:91.50%
Training:Epoch[003/025] Iteration[010/016] Loss: 0.2952 Acc:86.25%
epoch:3 conv1.weights[0, 0, ...] :
 tensor([[-0.0104, -0.0061, -0.0018,  0.0748,  0.0566,  0.0171, -0.0127],
        [ 0.0111,  0.0095, -0.1099, -0.2805, -0.2712, -0.1291,  0.0037],
        [-0.0069,  0.0591,  0.2955,  0.5872,  0.5197,  0.2563,  0.0636],
        [ 0.0305, -0.0670, -0.2984, -0.4387, -0.2709, -0.0006,  0.0576],
        [-0.0275,  0.0160,  0.0726, -0.0541, -0.3328, -0.4206, -0.2578],
        [ 0.0306,  0.0410,  0.0628,  0.2390,  0.4138,  0.3936,  0.1661],
        [-0.0137, -0.0037, -0.0241, -0.0659, -0.1507, -0.0822, -0.0058]],
       device='cuda:0')
Valid:   Epoch[003/025] Iteration[010/010] Loss: 0.2113 Acc:92.16%
Training:Epoch[004/025] Iteration[010/016] Loss: 0.4194 Acc:83.75%
......

发现卷积核参数没有改变
设置conv 小学习率,这里设置学习率为0

# ============================ step 4/5 优化器 ============================
# 法2 : conv 小学习率
# flag = 0
flag = 1
if flag:
    fc_params_id = list(map(id, resnet18_ft.fc.parameters()))     # 返回的是parameters的 内存地址
    base_params = filter(lambda p: id(p) not in fc_params_id, resnet18_ft.parameters())
    optimizer = optim.SGD([
        {'params': base_params, 'lr': LR*0},   # 0
        {'params': resnet18_ft.fc.parameters(), 'lr': LR}], momentum=0.9)

else:
    optimizer = optim.SGD(resnet18_ft.parameters(), lr=LR, momentum=0.9)               # 选择优化器

scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_decay_step, gamma=0.1)     # 设置学习率下降策略

输出:

use device :cuda
conv1.weights[0, 0, ...]:
 tensor([[-0.0104, -0.0061, -0.0018,  0.0748,  0.0566,  0.0171, -0.0127],
        [ 0.0111,  0.0095, -0.1099, -0.2805, -0.2712, -0.1291,  0.0037],
        [-0.0069,  0.0591,  0.2955,  0.5872,  0.5197,  0.2563,  0.0636],
        [ 0.0305, -0.0670, -0.2984, -0.4387, -0.2709, -0.0006,  0.0576],
        [-0.0275,  0.0160,  0.0726, -0.0541, -0.3328, -0.4206, -0.2578],
        [ 0.0306,  0.0410,  0.0628,  0.2390,  0.4138,  0.3936,  0.1661],
        [-0.0137, -0.0037, -0.0241, -0.0659, -0.1507, -0.0822, -0.0058]])
Training:Epoch[000/025] Iteration[010/016] Loss: 0.7759 Acc:45.62%
epoch:0 conv1.weights[0, 0, ...] :
 tensor([[-0.0104, -0.0061, -0.0018,  0.0748,  0.0566,  0.0171, -0.0127],
        [ 0.0111,  0.0095, -0.1099, -0.2805, -0.2712, -0.1291,  0.0037],
        [-0.0069,  0.0591,  0.2955,  0.5872,  0.5197,  0.2563,  0.0636],
        [ 0.0305, -0.0670, -0.2984, -0.4387, -0.2709, -0.0006,  0.0576],
        [-0.0275,  0.0160,  0.0726, -0.0541, -0.3328, -0.4206, -0.2578],
        [ 0.0306,  0.0410,  0.0628,  0.2390,  0.4138,  0.3936,  0.1661],
        [-0.0137, -0.0037, -0.0241, -0.0659, -0.1507, -0.0822, -0.0058]],
       device='cuda:0')
Valid:   Epoch[000/025] Iteration[010/010] Loss: 0.4140 Acc:84.31%
Training:Epoch[001/025] Iteration[010/016] Loss: 0.4420 Acc:80.00%
epoch:1 conv1.weights[0, 0, ...] :
 tensor([[-0.0104, -0.0061, -0.0018,  0.0748,  0.0566,  0.0171, -0.0127],
        [ 0.0111,  0.0095, -0.1099, -0.2805, -0.2712, -0.1291,  0.0037],
        [-0.0069,  0.0591,  0.2955,  0.5872,  0.5197,  0.2563,  0.0636],
        [ 0.0305, -0.0670, -0.2984, -0.4387, -0.2709, -0.0006,  0.0576],
        [-0.0275,  0.0160,  0.0726, -0.0541, -0.3328, -0.4206, -0.2578],
        [ 0.0306,  0.0410,  0.0628,  0.2390,  0.4138,  0.3936,  0.1661],
        [-0.0137, -0.0037, -0.0241, -0.0659, -0.1507, -0.0822, -0.0058]],
       device='cuda:0')
Valid:   Epoch[001/025] Iteration[010/010] Loss: 0.2643 Acc:90.85%
Training:Epoch[002/025] Iteration[010/016] Loss: 0.3811 Acc:80.62%
epoch:2 conv1.weights[0, 0, ...] :
 tensor([[-0.0104, -0.0061, -0.0018,  0.0748,  0.0566,  0.0171, -0.0127],
        [ 0.0111,  0.0095, -0.1099, -0.2805, -0.2712, -0.1291,  0.0037],
        [-0.0069,  0.0591,  0.2955,  0.5872,  0.5197,  0.2563,  0.0636],
        [ 0.0305, -0.0670, -0.2984, -0.4387, -0.2709, -0.0006,  0.0576],
        [-0.0275,  0.0160,  0.0726, -0.0541, -0.3328, -0.4206, -0.2578],
        [ 0.0306,  0.0410,  0.0628,  0.2390,  0.4138,  0.3936,  0.1661],
        [-0.0137, -0.0037, -0.0241, -0.0659, -0.1507, -0.0822, -0.0058]],
       device='cuda:0')
Valid:   Epoch[002/025] Iteration[010/010] Loss: 0.2335 Acc:91.50%
Training:Epoch[003/025] Iteration[010/016] Loss: 0.2952 Acc:86.25%

参数没有更新,但任然很快的使准确率达到了90%以上

  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值