class Net(nn.Module): def __init__(self , model): super(Net, self).__init__() #取掉model的后两层 self.resnet_layer = nn.Sequential(*list(model.children())[:-2]) self.transion_layer = nn.ConvTranspose2d(2048, 2048, kernel_size=14, stride=3) self.pool_layer = nn.MaxPool2d(32) self.Linear_layer = nn.Linear(2048, 8) def forward(self, x): x = self.resnet_layer(x) x = self.transion_layer(x) x = self.pool_layer(x) x = x.view(x.size(0), -1) x = self.Linear_layer(x) return x resnet = models.resnet50(pretrained=True) model = Net(resnet)
训练特定层,冻结其它层
基本思想是,所有模型都有一个函数model.children(),它返回它的层。 在每个层中,都有参数(或权重),可以在任何子层(即层)上使用.param()获得这些参数。 现在,每个参数都有一个名为requires_grad的属性,默认值为True。 True意味着它将被反向传播,因此要冻结一个层,你需要为一个层的所有参数设置requires_grad为False。
import torchvision.models as models
resnet = models.resnet18(pretrained=True)
ct = 0<br>#This freezes layers 1-6 in the total 10 layers of Resnet18.
for child in resnet.children():
ct += 1
if ct< 7:
for param in child.parameters():
param.requires_grad = False