Pytorch学习之猫狗大战

import torch
import torchvision.models as models #储存里常用的一些模型
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import time
import torchvision.transforms as transforms # 常用的图片变换,例如裁剪、旋转等;
from torchvision import datasets # 一些加载数据的函数及常用的数据集接口
import torch
from torch.utils.data import DataLoader # 数据加载器,结合了数据集和取样器,并且可以提供多个线程处理数据集
import argparse

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
is_cuda = torch.cuda.is_available()

def resnet(out_feature=2, feature=18, pretrained=True): # 定义模型
    assert feature in [18,34,50,101, 152], 'ResNet don\'t consist of {}'.format(feature) # 确保模型存在
    if feature == 18:
        model_fit = models.resnet18(pretrained=pretrained) # 调用库中已有模型,并开启预训练,会下载一个预训练文件
    elif feature == 34:
        model_fit = models.resnet34(pretrained=pretrained)
    elif feature == 50:
        model_fit = models.resnet50(pretrained=pretrained)
    elif feature == 101:
        model_fit = models.resnet101(pretrained=pretrained)
    elif feature == 152:
        model_fit = models.resnet152(pretrained=pretrained)
    num_ftrs = model_fit.fc.in_features # 获取最后一个全连层输入特征数
    model_fit.fc = nn.Linear(num_ftrs, out_feature) # 根据类别数,重定义最后一个全列层
    return model_fit

class VGG(nn.Module): # VGG模型定义
    def __init__(self, num_classes=2, feature=16, init_weights=False):
        assert feature in [11, 13, 16, 19], 'VGG don\'t consist of {}'.format(feature)
        self.cfgs = { # 定义各种模型
            11: [64, 'M', 128, 'M', 256,256, 'M', 512, 512,'M', 512,512, 'M'],
            13: [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
            16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
            19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
        }
        super(VGG, self).__init__()
        self.classifier = nn.Sequential(
            nn.Linear(512*7*7, 500),
            nn.ReLU(True),
            nn.Dropout(p=0.5),
            nn.Linear(500, 20),
            nn.ReLU(True),
            nn.Dropout(p=0.5),
            nn.Linear(20, num_classes)
        )
        self.backbone = self._make_feature(self.cfgs[feature]) 
        if init_weights:
            self._initialize_weights()
    
    def forward(self, x): # 向前传播网络构建
        x = self.backbone(x)
        x = torch.flatten(x, start_dim=1) # 特征扁平化,也可以x.view(-1, 512*7*7)
        x = self.classifier(x)
        return x

    def _initialize_weights(self): # 初始化参数
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0)
    
    def _make_feature(self, cfg: list): # 构建模型
        layers = []
        in_channels = 3
        for v in cfg:
            if v == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
                layers += [conv2d, nn.ReLU(True)]
                in_channels = v
        return nn.Sequential(*layers)

def dataset(batch_size): # 数据读取
    simple_transform = transforms.Compose([transforms.Scale((224,224)), transforms.ToTensor(),  
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])    # transforms.Compose做图片处理和增强
    train = datasets.ImageFolder('dogsandcats/train/', simple_transform)
    valid = datasets.ImageFolder('dogsandcats/valid/', simple_transform)
    train_data_gen = DataLoader(train, batch_size=batch_size, num_workers=3,shuffle=True)
    valid_data_gen = DataLoader(valid, batch_size=batch_size, num_workers=3)
    datasetloader = {"train": train_data_gen, "valid": valid_data_gen}
    datasetlen = {"train": len(train), "valid": len(valid)}
    return datasetloader, datasetlen

def train_model(model, datasetloader, datasetlen, criterion, optimizer, scheduler, num_epochs=25): # 模型训练
    since = time.time()
    best_model_wts = model.state_dict()
    best_acc = 0.0
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        for phase in ['train', 'valid']:
            if phase == 'train':
                scheduler.step() # 调整学习率
                model.train(True) # 开启训练模式
            else:
                model.train(False) # 开启验证模式
            running_loss = 0.0
            running_corrects = 0
            for data in datasetloader[phase]:
                inputs, labels = data
                if is_cuda:
                    inputs = inputs.to(device) # 将数据部署到GPU
                    labels = labels.to(device)
                else:
                    inputs, labels = inputs, labels
                optimizer.zero_grad() #初始化梯度
                outputs = model(inputs)
                _, preds = torch.max(outputs.data, 1)
                loss = criterion(outputs, labels) # 求损失
                if phase == "train":
                    loss.backward() # 反向求道
                    optimizer.step() # 更新权重
                running_loss += loss.data.cpu().numpy()
                running_corrects += (torch.sum(preds == labels.data)).data.cpu().numpy()
            epoch_loss = running_loss / datasetlen[phase]
            epoch_acc = running_corrects / datasetlen[phase]
            print(running_corrects, datasetlen[phase])
            print('{} Loss: {:.4f}  Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
            if phase == "valid" and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = model.state_dict()
            print()
    time_elapsed = time.time() - since
    print("Training complete in {:.0f}m {:0f}s".format(time_elapsed//60, time_elapsed%60))
    print("Best val Acc: {:4f}".format(best_acc))
    model.load_state_dict(best_model_wts)
    return model

def parse_args(): # 参数服务器,定义超参数
    parser = argparse.ArgumentParser(description="Arguments of training")
    parser.add_argument('--model_name', type=str, default='res', help="vgg or resnet")
    parser.add_argument('--feature', type=int, default=18, help="layer feature of the model, vgg includes 11,13,16,19, res includes 18,34,50,101,152")
    parser.add_argument('--learning_rate', type=float, default=0.001, help="learing rate of the backpropagation")
    parser.add_argument('--num_epoch', type=int, default=1, help='number of the epoch')
    parser.add_argument('--classes', type=int, default=2, help='number of the classes')
    parser.add_argument('--batch_size', type=int, default=16, help='batch size')
    return parser.parse_args()

if __name__=="__main__":
    arg = parse_args()
    datasetloader, datasetlen = dataset(arg.batch_size)
    assert arg.model_name == 'res' or arg.model_name == "vgg", 'your model name is not existed'
    if arg.model_name == 'res':
        net = resnet(arg.classes, feature=arg.feature)
    else:
        net = VGG(arg.classes,arg.feature, True)
    if is_cuda:
        net = net.to(device) # 模型加载到GPU
    #定义优化器
    criterion = nn.CrossEntropyLoss()
    optimizer_fit = optim.SGD(net.parameters(), lr=arg.learning_rate, momentum=0.9)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_fit, step_size=7, gamma=0.1)

    model = train_model(net, datasetloader, datasetlen, criterion, optimizer_fit, exp_lr_scheduler, arg.num_epoch)
    torch.save(model, "./models/{}_{}_model_best.pth".format(arg.model_name, str(arg.feature))) # 保存模型参数
  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值