一、模型定义
常见:
-
CNN——解决了图像、视频处理中的诸多问题
-
RNN/LSTM——序列数据处理的问题
-
GNN——图模型
主要定义方式:
定义 | 功能 | 用途 |
---|---|---|
Sequential | 简单串联各个层 | 适用于快速验证结果 |
ModuleList | 接收模型列表,方便添加相似层 | 某个完全相同的层需要重复出现多次时,非常方便实现 |
ModuleDict | 更方便地为神经网络的层添加名称 | 同ModuleList |
1、Sequential
排列方式:
-
直接排列
import torch.nn as nn #两层神经网络 net1 =nn.Sequential( #784输入,隐藏节点数256 nn.Linear(784,256), #激活函数 nn.ReLU(), #输出节点数10 nn.Linear(256,10) ) print(net1) Sequential( (0): Linear(in_features=784, out_features=256, bias=True) (1): ReLU() (2): Linear(in_features=256, out_features=10, bias=True) )
-
使用OrderedDict(带顺序字典)
net2=nn.Sequential(collections.OrderedDict([ #+标号 ('fcl',nn.Linear(784,256)), ('relul',nn.ReLU()), ('fc2',nn.Linear(256,10))])) print(net2) Sequential( (fcl): Linear(in_features=784, out_features=256, bias=True) (relul): ReLU() (fc2): Linear(in_features=256, out_features=10, bias=True) )
2、ModuleList
#将定义的网络进行存储,未知网络顺序
net3=nn.ModuleList([nn.Linear(784,256),nn.ReLU()])
#append添加,extend删除,类似list
net3.append(nn.Linear(256,10))
#索引访问,-1代表最后一个元素
print(net3[-1])
print(net3)
ModuleList(
(0): Linear(in_features=784, out_features=256, bias=True)
(1): ReLU()
(2): Linear(in_features=256, out_features=10, bias=True)
)
需要经过forward函数指定各个层的先后顺序后才算完成了模型的定义
class Net3(nn.Module):
def __int__(self):
super().__int__()
#模型定义
self.modulelist=nn.ModuleList([nn.Linear(784,356),nn.ReLU()])
self.modulelist.append(nn.Linear(256,10))
def forward(self,x):
#遍历定义的modulelist层
for layer in self.modulelist:
x=layer(x)
return x
3、ModuleDict
和ModuleList的作用类似
net4=nn.ModuleDict({
'linear':nn.Linear(784,256),
'act':nn.ReLU(),
})
net4['output']=nn.Linear(256,10)
#访问
print(net4['linear'])
print(net4.output)
print(net4)
Linear(in_features=784, out_features=256, bias=True)
Linear(in_features=256, out_features=10, bias=True)
ModuleDict(
(linear): Linear(in_features=784, out_features=256, bias=True)
(act): ReLU()
(output): Linear(in_features=256, out_features=10, bias=True)
)
同样需要经过forward函数
二、搭建复杂网络
1、U-Net
残差连接结构解决模型学习中的退化问题、
U-Net主要模型块:
1)每个子块内部的两次卷积(Double Convolution)
2)左侧模型块之间的下采样连接,即最大池化(Max pooling)
3)右侧模型块之间的上采样连接(Up sampling)
4)输出层的处理
import torch
import torch.nn as nn
import torch.nn.functional as F
模块内部2次卷积:
class Doubleconv(nn.Module):
def __int__(self,in_channels,out_channels,mid_channels=None):
super().__int__()
if not mid_channels:
mid_channels=out_channels
self.double_conv=nn.Sequential(
nn.Conv2d(in_channels,mid_channels,kernel_size=3,padding=1,bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels,out_channels,kernel_size=3,padding=1,bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self,x):
return self.double_conv(x)
最大池化:
class Down(nn.Module):
def __int__(self,in_channels,out_channels):
super().__init()
self.maxpool_conv=nn.Sequential(
nn.MaxPool2d(2),
Doubleconv(in_channels,out_channels)
)
def forward(self,x):
return self.maxpool_conv(x)
上采样:
class Up(nn.Module):
def __int__(self,in_channels, out_channels, bilinear=False):
super().__init__()
#差值
if bilinear:
self.up=nn.Upsample(scale_factor=2,mode='bilinear',align_corners=True)
self.conv=Doubleconv(in_channels, out_channels, in_channels // 2)
else:
self.up=nn.ConvTranspose2d(in_channels,out_channels)
def forward(self,x1,x2):
self.conv = DoubleConv(in_channels, out_channels)
x1=self.up(x1)
diffy=x2.size()[2]-x1.size()[2]
diffx=x2.size()[3]-x1.size()[3]
x1=F.pad(x1,[diffx //2,diffx-diffx //2,
diffy //2,diffy-diffy //2])
#叠加
x=torch.cat([x2,x1],dim=1)
return self.conv(x)
output:
class outconv(nn.Module):
def __int__(self,in_channels,out_channels):
super(outconv, self).__int__()
self.conv=nn.Conv2d(in_channels,out_channels,kernel_size=1)
def forward(self,x):
return self.conv(x)
组装:
class UNet(nn.Module):
def __int__(self,n_channels,n_classes,bilinear=False):
super(UNet, self).__int__()
self.n_channels=n_channels
self.n_classes = n_classes
self.bilinear = bilinear
• self.inc=Doubleconv(n_channels,64)
• self.down1=Down(64,128)
• self.down2 = Down(128, 256)
• self.down3 = Down(256, 512)
• factor = 2 if bilinear else 1
• self.down4 = Down(512, 1024 // factor)
• self.up1 = Up(1024, 512 // factor, bilinear)
• self.up2 = Up(512, 256 // factor, bilinear)
• self.up3 = Up(256, 128 // factor, bilinear)
• self.up4 = Up(128, 64, bilinear)
• self.outc =outconv(64, n_classes)
• def forward(self, x):
• x1 = self.inc(x)
• x2 = self.down1(x1)
• x3 = self.down2(x2)
• x4 = self.down3(x3)
• x5 = self.down4(x4)
• x = self.up1(x5, x4)
• x = self.up2(x, x3)
• x = self.up3(x, x2)
• x = self.up4(x, x1)
• logits = self.outc(x)
• return logits
2、修改模型
假设有一个现成的模型,但该模型中的部分结构不符合我们的要求,为了使用模型,我们需要对模型结构进行必要的修改
在已有模型的基础上:
修改模型若干层
添加额外输入
添加额外输出
2.1、修改模型层
调用要修改的部分进行修改
unet=UNet(3,1)
unet1=copy.deepcopy(unet)
print(unet1.outc)
OutConv(
(conv): Conv2d(64, 1, kernel_size=(1, 1), stride=(1, 1))
)
#修改
b=torch.randn(1,3,224,224)
out_unet1=unet1(b)
print(out_unet1.shape)
#调用要修改的+重新实例化
unet1.outc=OutConv(64,5)
print(unet1.outc)
out_unet1=unet1(b)
print(out_unet1.shape)
torch.Size([1, 1, 224, 224])
OutConv(
(conv): Conv2d(64, 5, kernel_size=(1, 1), stride=(1, 1))
)
torch.Size([1, 5, 224, 224])
2.2、添加外部输入
add_variable
参数
class UNet2(nn.Module):
#节点需和forward流匹配
###用add_varible添加时节点需改变,eg:128-添加一个节点后变为129
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet2, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = 2 if bilinear else 1
self.down4 = Down(512, 1024 // factor)
self.up1 = Up(1024, 512 // factor, bilinear)
self.up2 = Up(512, 256 // factor, bilinear)
self.up3 = Up(256, 128 // factor, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
#添加add_variable--常数:为每个x加值
# --同x一样的mask
def forward(self, x,add_variable):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
####修改
x=x+add_variable
logits = self.outc(x)
return logits
2.3、添加额外输出
增加return返回,需增加接收的变量
class UNet2(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet2, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = 2 if bilinear else 1
self.down4 = Down(512, 1024 // factor)
self.up1 = Up(1024, 512 // factor, bilinear)
self.up2 = Up(512, 256 // factor, bilinear)
self.up3 = Up(256, 128 // factor, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
#添加中间层输出
return logits,x5
unet2=UNet2(3,1)
# unet1=copy.deepcopy(unet)
# print(unet1.outc)
b=torch.randn(1,3,224,224)
c=torch.randn(1,1,224,224)
#额外添加一个输出
out_unet2,mid_out=unet2(b,c)
print(out_unet2.shape,mid_out.shape)
3、模型保存与读取
模型存储格式:pkl、pt、pth
模型储存内容:模型结构和权重
#CPU或单卡:保存&读取整个模型
torch.save(unet,"./unet_example.pth")
loaded_unet=torch.load("./unet_example.pth")
loaded_unet.state_dict()
#权重--state_dict()
torch.save(unet.state_dict(),"./unet_weigght_example.pth")
#缓存变量
loaded_unet_weight=torch.load("./unet_weigght_example.pth")
unet.load_state_dict(loaded_unet_weight)
unet.state_dict()
#GPU单卡、多卡储存
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # 如果是多卡改成类似0,1,2
unet_singl = unet.cuda() # 单卡
unet_mul=copy.deepcopy(unet)
unet_mul=nn.DataParallel(unet_mul).cuda()#多卡
torch.save(unet_mul,"./unet_mul_example.pth")
loaded_unet_mul=torch.load("./unset_mul_example.pth")
#多卡保存模型权重
torch.save(unet_mul.state_dict(),"./unet_weight_mul_example.pth")
loaded_unet_weight_mul=torch.load("./unet_weight_mul_example.pth")
unet_mul.load_state_dict(loaded_unet_weight_mul)
unet_mul=nn.DataParallel(unet_mul).cuda()
unet_mul.state_dict()