pytorch使用之微调网络模型

import torchvision.models as models

1.调整最后一层输出维度

model = models.ResNet(pretrained=True)
fc_features = model.fc.in_features# 获取全连接层输入维度
model.fc = torch.nn.Linear(fc_features, num_class)
``

2.调整某一层参数

import torch
import torch.nn as nn
from torchvision import models

class ft_net(nn.Module):
    def __init__(self, class_num = 751):
        super(ft_net, self).__init__()
        #load the model
        model_ft = models.resnet50(pretrained=True) 
        if stride == 1:
        	model_ft.layer4[0].downsample[0].stride = (1,1)
        	model_ft.layer4[0].conv2.stride = (1,1)
        model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
        self.model = model_ft
        self.fc = nn.Linear(2048, num_class)
    def forward(self, x):
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)
        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x = self.model.layer3(x)
        x = self.model.layer4(x)
        x = self.model.avgpool(x)
        x = torch.squeeze(x)
        x = self.fc(x) #use our classifier.
        return x

调整模型中layer4[0]中的downsample[0]的stride值。

添加层并加载参数

import torchvision.models as models
import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
 
class CNN(nn.Module):
    def __init__(self, block, layers, num_classes=9):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        #新增一个反卷积层
        self.convtranspose1 = nn.ConvTranspose2d(2048, 2048, kernel_size=3, stride=1, padding=1, output_padding=0, groups=1, bias=False, dilation=1)
        #新增一个最大池化层
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        #去掉原来的fc层,新增一个fclass层
        self.fclass = nn.Linear(2048, num_classes)
 
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
 
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )
 
        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))
 
        return nn.Sequential(*layers)
 
    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
 
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
 
        x = self.avgpool(x)
        #新加层的forward
        x = x.view(x.size(0), -1)
        x = self.convtranspose1(x)
        x = self.maxpool2(x)
        x = x.view(x.size(0), -1)
        x = self.fclass(x)
 
        return x
#加载model
resnet50 = models.resnet50(pretrained=True)
cnn = CNN(Bottleneck, [3, 4, 6, 3])
#读取参数
pretrained_dict = resnet50.state_dict()
model_dict = cnn.state_dict()
#将pretrained_dict里不属于model_dict的键剔除掉
pretrained_dict =  {k: v for k, v in pretrained_dict.items() if k in model_dict}
#更新现有的model_dict
model_dict.update(pretrained_dict)
#加载我们真正需要的state_dict
cnn.load_state_dict(model_dict)
#print(resnet50)
print(cnn)

多GPU训练参数模型加载:
使用多GPU训练权重需要使用model = torch.nn.Parallel(model)处理模型

对于参数模型中可能包含state_dict,epoch,accruccy,等键值对,我们只需要加载key为state_dict的键值对。
例:

pretrained_dict = torch.load(dict_path)
model_dict_clone = pretrained_dict.copy()
for key, value in model_dict_clone.items():
	if key == ‘state_dict’:
		pretrained_dict = model_dict_clone['state_dict']

单个GPU训练使用多GPU加载
由于单个GPU训练保存的权重参数,key没有module前缀,使用并行GPU不能直接加载模型参数。需要进行键值对的key进行处理,需要用list将字典包装,否则在迭代处理字典的时候会出现错误

pretrained_dict = torch.load(dict_path)
model_dict_clone = pretrained_dict.copy()
for key, value in list(model_dict_clone.items()):
	model_dict_clone['module.' + key] = model_dict_clone.pop(key)
  • 1
    点赞
  • 23
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值