pytorch学习一resnet50

 

resnet50模型代码

import math

import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url

# Bottleneck用来定义Conv Block和Identity Block
class Bottleneck(nn.Module):
    def __init__(self, in_channels, filters, stride=1):
        super(Bottleneck, self).__init__()
        self.stride=stride
        self.in_channels=in_channels
        F1,F2,F3=filters
        self.out_channels=F3
        self.block=nn.Sequential(nn.Conv2d(in_channels,F1,1,stride=stride,padding=0,bias=False),
                                 nn.BatchNorm2d(F1),
                                 nn.ReLU(inplace=True),
                                 nn.Conv2d(F1, F2, kernel_size=3, stride=1, padding=1, bias=False),
                                 nn.BatchNorm2d(F2),
                                 nn.ReLU(inplace=True),
                                 nn.Conv2d(F2, F3, kernel_size=1, padding=0,bias=False),
                                 nn.BatchNorm2d(F3)
                                 )

        self.downsample =nn.Sequential(nn.Conv2d(in_channels,out_channels=F3,kernel_size=1,stride=stride,bias=False),
                                       nn.BatchNorm2d(F3))
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        identity = x  # 输入
        out = self.block(x)
        if self.stride != 1 or self.in_channels != self.out_channels:  #
            identity = self.downsample(x)
        # 如果残差边上有卷积就对残差边进行卷积,再和输出进行相加;
        # 如果残差边上没有卷积就直接进行输出
        out += identity  #
        out = self.relu(out)
        return out


class Resnet50(nn.Module):
    def __init__(self,n_class):

        # 假设输入进来的图片是600,600,3

        super(Resnet50, self).__init__()

        self.stage1=nn.Sequential(
                                  nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,bias=False),  # 输入三通道输出64通道,尺寸由600变为300,64*300*300,k7,s=2,p=3
                                  nn.BatchNorm2d(64),  # 标准化
                                  nn.ReLU(inplace=True) , # 激活函数
                                  # 300,300,64 -> 150,150,64
                                  nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
                                  ) # 最大池化64*150*150,k=3,s=2,p=1  第一步

        self.stage2 = nn.Sequential(
                                  Bottleneck(in_channels=64,filters=[64,64,256], stride=1),
                                  Bottleneck(in_channels=256, filters=[64, 64, 256]),
                                  Bottleneck(in_channels=256, filters=[64, 64, 256])
        )

        self.stage3 = nn.Sequential(
                                  Bottleneck(in_channels=256, filters=[128, 128, 512], stride=2),#下采样
                                  Bottleneck(in_channels=512, filters=[128, 128, 512],stride=1),
                                  Bottleneck(in_channels=512, filters=[128, 128, 512],stride=1),
                                  Bottleneck(in_channels=512, filters=[128, 128, 512],stride=1)
        )
        self.stage4 = nn.Sequential(
                                  Bottleneck(in_channels=512, filters=[256, 256, 1024], stride=2),#下采样
                                  Bottleneck(in_channels=1024, filters=[256, 256, 1024],stride=1),
                                  Bottleneck(in_channels=1024, filters=[256, 256, 1024],stride=1),
                                  Bottleneck(in_channels=1024, filters=[256, 256, 1024],stride=1),
                                  Bottleneck(in_channels=1024, filters=[256, 256, 1024],stride=1),
                                  Bottleneck(in_channels=1024, filters=[256, 256, 1024],stride=1),
                                  Bottleneck(in_channels=1024, filters=[256, 256, 1024],stride=1))
        self.stage5 = nn.Sequential(
                                  Bottleneck(in_channels=1024, filters=[512, 512, 2048], stride=2),#下采样
                                  Bottleneck(in_channels=2048, filters=[512, 512, 2048],stride=1),
                                  Bottleneck(in_channels=2048, filters=[512, 512, 2048],stride=1),
                                 )
        self.avgpool = nn.AvgPool2d((1,1))
        self.fc = nn.Linear(2048, n_class)

    def forward(self, x):
        x = self.stage1(x)
        x = self.stage2(x)
        x = self.stage3(x)
        x = self.stage4(x)
        x = self.stage5(x)
        x = self.avgpool(x)
        x = x.view(x.size(0), -1)  #展平为1*2048
        x = self.fc(x)#全连接
        return x
if __name__ == '__main__':
    model=Resnet50(2)
    x=torch.randn(1,3,224,224)
    y=model(x)


  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
当使用PyTorch进行迁移学习时,可以使用预训练的ResNet-50模型作为基础模型,并在其基础上进行微调。以下是一个基于PyTorch的完整代码示例: ```python import torch import torch.nn as nn import torch.optim as optim from torchvision import models, transforms, datasets # 设置随机种子,保证实验可复现性 torch.manual_seed(123) # 数据预处理和加载 data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) } data_dir = 'path/to/your/dataset' # 设置数据集的路径 image_datasets = {x: datasets.ImageFolder(f'{data_dir}/{x}', data_transforms[x]) for x in ['train', 'val']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=32, shuffle=True) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes # 加载预训练模型 model = models.resnet50(pretrained=True) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, len(class_names)) # 替换最后一层全连接层 # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # 训练模型 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) criterion = criterion.to(device) num_epochs = 10 best_acc = 0.0 for epoch in range(num_epochs): for phase in ['train', 'val']: if phase == 'train': model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) if phase == 'train': loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}') if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc # 在测试集上评估模型 model.eval() test_dataset = datasets.ImageFolder(f'{data_dir}/test', data_transforms['val']) test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False) test_corrects = 0 for inputs, labels in test_dataloader: inputs = inputs.to(device) labels = labels.to(device) with torch.set_grad_enabled(False): outputs = model(inputs) _, preds = torch.max(outputs, 1) test_corrects += torch.sum(preds == labels.data) test_acc = test_corrects.double() / len(test_dataset) print(f'Test Accuracy: {test_acc:.4f}') ``` 请注意,上述代码中的数据集路径、训练轮数、优化器参数等需要根据您的具体情况进行适当修改。此代码将在训练集上微调预训练的ResNet-50模型,并在验证集和测试集上评估性能。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值