使用pytorch实现resnet18

代码

from torchvision.datasets import ImageFolder
from torchvision.transforms import transforms
from torch.utils.data.dataloader import DataLoader
import torchvision.models as models
import torch.nn as nn
from torch.cuda import is_available as is_cuda
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
import torch
import numpy as np
import matplotlib.pyplot as plt
import time

'''
import os
# 指定gpu
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
'''
path = r'E:\Data\机器学习数据\new-dogs-vs-cats'
# 使用pytorch.transforms的transforms工具对图片进行预处理,比如下面的调整尺寸Resize,拓展维度ToTensor,以及数据标准化Normalize。
# ToTensor()能够把灰度范围从0-255变换到0-1之间,而后面的transform.Normalize()则把0-1变换到(-1,1).
simple_transform = transforms.Compose([transforms.Resize((224, 224)),
                                       transforms.ToTensor(),
                                       transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
train = ImageFolder(path + r'\train', simple_transform)
valid = ImageFolder(path + r'\valid', simple_transform)
def imshow(inp):
    '''
    Imshow for Tensor.
    :param inp:
    :return:
    '''
    inp = inp.numpy().transpose((1, 2, 0))
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    inp = std * inp + mean
    inp = np.clip(inp, 0, 1)
    plt.imshow(inp)

#imshow(train[50][0])
# 按批量加载PyTorch张量, num_workers负责并发
train_data_gen = DataLoader(train, batch_size=64)
valid_data_gen = DataLoader(valid, batch_size=64)

dataset_sizes = {'train': train_data_gen.dataset.__len__(), 'valid': valid_data_gen.dataset.__len__()}
dataloaders = {'train': train_data_gen, 'valid': valid_data_gen}

model_ft = models.resnet18(pretrained=True)# 调用pytorch已经封装好的模型resnet18,并且自动预训练。
num_ftrs = model_ft.fc.in_features # 获取resnet18的fc(函数,这里用的nn.Linear)层的输入的特征
model_ft.fc = nn.Linear(num_ftrs, 2) # 将输出维度改为2,因为我们这里要做2分类。

def train_model(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, num_epochs=25):
    since = time.time() # 获取开始时间

    best_model_wts = model.state_dict()
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('Epoch{}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        # 每轮都有训练和验证阶段
        for phase in ['train', 'valid']:
            if phase == 'train':
                scheduler.step()
                model.train(True) # 模型设为训练模式
            else:
                model.train(False) # 模型设为评估模式

            running_loss = 0.0
            running_corrects = 0

            # 在数据上迭代
            for data in dataloaders[phase]:
                # 获取输入
                inputs, labels = data
                # 封装成变量
                if is_cuda():
                    inputs = Variable(inputs.cuda())
                    labels = Variable(labels.cuda())
                else:
                    inputs, labels = Variable(inputs), Variable(labels)

                # 梯度清零
                optimizer.zero_grad()

                # 前向传播
                outputs = model(inputs)
                # torch.max的两个参数,第一个是输入的tensor,第二个输入为0或1,表示沿着第几个维度进行求最大值。
                # 返回两个tensor对象,第一个是每行(此处沿着第1维求max)的最大值,第二个tensor表示这个最大值的列号(在第1维上的索引)
                _, preds = torch.max(outputs.data, 1) # 找概率最大的位置
                loss = criterion(outputs, labels)

                # 只在训练阶段反向优化
                if phase == 'train':
                    loss.backward()
                    optimizer.step()
                # 统计
                running_loss += loss.data
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects / dataset_sizes[phase]

            print('{} Loss: {:.4f}, Acc:{:.4f}'.format(phase, epoch_loss, epoch_acc))

        # 深度复制模型
        if phase == 'valid' and epoch_acc > best_acc:
            best_acc = epoch_acc
            best_model_wts = model.state_dict()

    print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # 加载最优权重
    model.load_state_dict(best_model_wts)
    return model

if is_cuda():# 判断GPU是否可用
    model_ft = model_ft.cuda()
    # 损失函数和优化器
    learning_rate = 0.001
    criterion = nn.CrossEntropyLoss()
    optimizer_ft = optim.SGD(model_ft.parameters(), lr=learning_rate, momentum=0.9)
    # 设置学习率的衰减规则,每7轮衰减为原来的0.1
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
    train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, dataloaders, dataset_sizes, num_epochs=25)
else:
    print('CUDA cannot be utilized!')
  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值