2021-07-10

该博客介绍了一种使用迁移学习的方法来训练AlexNet模型进行图像分类。首先,定义了数据预处理步骤,包括尺度调整、中心裁剪、随机翻转等。然后,加载训练和测试数据集,并使用DataLoader加载数据。接着,加载预训练的AlexNet模型,冻结其参数,并修改分类层以适应新的任务。最后,设置损失函数、优化器并进行训练,保存和加载模型权重,并测试模型的准确性。
摘要由CSDN通过智能技术生成

迁移学习

在这里插入代码片# coding=utf-8

# from __future__ import print_function, division

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
import os

data_transforms = {
    'train': transforms.Compose([
        transforms.Scale(230),
        transforms.CenterCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ]),
    'test': transforms.Compose([
        transforms.Scale(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ]),
}

data_directory = './data'
trainset = datasets.ImageFolder(os.path.join(data_directory, 'train'), data_transforms['train'])
testset = datasets.ImageFolder(os.path.join(data_directory, 'test'), data_transforms['test'])

trainloader = torch.utils.data.DataLoader(trainset, batch_size=5, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=5, shuffle=True)


def imshow(inputs):
    inputs = inputs / 2 + 0.5
    inputs = inputs.numpy().transpose((1, 2, 0))
    print(inputs)
    plt.imshow(inputs)
    plt.show()


inputs, classes = next(iter(trainloader))

imshow(torchvision.utils.make_grid(inputs))



alexnet = models.alexnet(pretrained=True)

for param in alexnet.parameters():
    param.requires_grad = False

alexnet.classifier = nn.Sequential(
    nn.Dropout(),
    nn.Linear(256 * 6 * 6, 4096),
    nn.ReLU(inplace=True),
    nn.Dropout(),
    nn.Linear(4096, 4096),
    nn.ReLU(inplace=True),
    nn.Linear(4096, 2), )

CUDA = torch.cuda.is_available()

if CUDA:
    alexnet = alexnet.cuda()

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(alexnet.classifier.parameters(), lr=0.001, momentum=0.9)


def train(model, criterion, optimizer, epochs=1):
    for epoch in range(epochs):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            inputs, labels = data
            if CUDA:
                inputs, labels = inputs.cuda(), labels.cuda()
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            if i % 10 == 9:
                print('[Epoch:%d, Batch:%5d] Loss: %.3f' % (epoch + 1, i + 1, running_loss / 100))
                running_loss = 0.0

    print('Finished Training')


def test(testloader, model):
    correct = 0
    total = 0
    for data in testloader:
        images, labels = data
        if CUDA:
            images = images.cuda()
            labels = labels.cuda()
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum()
    print('Accuracy on the test set: %d %%' % (100 * correct / total))


def load_param(model, path):
    if os.path.exists(path):
        model.load_state_dict(torch.load(path))


def save_param(model, path):
    torch.save(model.state_dict(), path)


load_param(alexnet, 'tl_model.pkl')

train(alexnet, criterion, optimizer, epochs=2)

save_param(alexnet, 'tl_model.pkl')

test(testloader, alexnet)


路径问题:https://blog.csdn.net/qq_37591637/article/details/103991957
加载数据时出错:https://blog.csdn.net/jacke121/article/details/81456842
代码:https://blog.csdn.net/jacke121/article/details/81456842

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值