Relu>Prelu
网络模型代码
class TumorNet(nn.Module):
def __init__(self):
super(TumorNet, self).__init__()
self. nff = [1, 8]#NumFeature
# forward1
self.forward1 = nn.Sequential(
nn.Conv3d(self.nff[0], self.nff[1], kernel_size=3, padding=1),
nn.InstanceNorm3d(self.nff[1]),
nn.ReLU(inplace=True),
# nn.PReLU(),
nn.Conv3d(self.nff[1], self.nff[1], kernel_size=3, padding=1),
nn.InstanceNorm3d(self.nff[1]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
def forward(self, input):
#encoder
layer1 = self.forward1(input)
return layer1
Step1: 打印网络修改前后的参数名和shape
此步骤帮助你对模型改动一目了然,然后确定参数修改方案。
net = TumorNet().cuda()
for name, param in net.named_parameters():
print(name,param.shape)
其中红色为“增加”的网络参数。
注:模型的参数表是orderDict类型(有序字典),旧模型中的relu虽然没有参数,但是也会占用编号。
Step2: 确定并实施修改方案
import torch
dict = torch.load("./results/model_0-9-8222.pth")
default = torch.Tensor([0.25])# this step is of great importance, not 0.25 or[0.25]
dict['forward1.2.weight'] = default
dict["forward1.5.weight"] = default
torch.save(dict, "./results/Prelu_model_0-9-8222.pth")
Pool>Stride Convolution
网络模型代码
from torch import nn
class TumorNet(nn.Module):
def __init__(self):
super(TumorNet, self).__init__()
self. nff = [1, 8, 16]#NumFeature
# forward1
self.forward1 = nn.Sequential(
nn.Conv3d(self.nff[0], self.nff[1], kernel_size=3, padding=1),
nn.InstanceNorm3d(self.nff[1]),
# nn.ReLU(inplace=True),
nn.PReLU(),
nn.Conv3d(self.nff[1], self.nff[1], kernel_size=3, padding=1),
nn.InstanceNorm3d(self.nff[1]),
# nn.ReLU(inplace=True)
nn.PReLU()
)
self.maxpool = nn.MaxPool3d(kernel_size=2, stride=2)
# self.downsamp1 = nn.Conv3d(self.nff[1], self.nff[2], kernel_size=3, stride=2, padding=1)
def forward(self, input):
#encoder
layer1 = self.forward1(input)
down1 = self.maxpool(layer1)
# down1 = self.downsamp1(layer1)
return down1
Step1: 打印网络修改前后的参数名和shape
此步骤帮助你对模型改动一目了然,然后确定参数修改方案。
net = TumorNet().cuda()
for name, param in net.named_parameters():
print(name,param.shape)
其中红色为“增加”的网络参数。
Step2: 确定并实施修改方案
import torch
dict = torch.load("./results/model_0-9-8222.pth")
# layer absent
for name, param in net.named_parameters():
if name.startswith('downsamp'):
print(name,param.shape)
if name.endswith('weight'):
weight = torch.empty(param.shape)
dict[name] = nn.init.kaiming_uniform_(weight, 0.25)
if name.endswith('bias'):
bias = torch.empty(param.shape)
dict[name] = nn.init.constant_(bias, 0)
torch.save(dict, "./results/Prelu_model_0-9-8222.pth")
神经网络的权重初始化:
针对ReLU的初始化方法:torch.nn.init.kaiming_uniform_(), torch.nn.init.kaiming_normal_()
Pytroch网络模型:修改参数值,修改参数名,添加参数层,删除参数层