unet学习笔记

一 网络架构

 

上图展示了网络结构,它由contracting path 和 expansive path组成。contracting path是典型的卷积网络架构。它的架构是一种重复结构,每次重复中都有2个卷积层和一个pooling层,卷积层中卷积核大小均为3*3,激活函数使用ReLU,两个卷积层之后是一个2*2的步长为2的max pooling层。每一次下采样后我们都把特征通道的数量加倍。contracting path中的每一步都首先使用反卷积(up-convolution),每次使用反卷积都将特征通道数量减半,特征图大小加倍。反卷积过后,将反卷积的结果与contracting path中对应步骤的特征图拼接起来。contracting path中的特征图尺寸稍大,将其修剪过后进行拼接。对拼接后的map进行2次3*3的卷积。最后一层的卷积核大小为1*1,将64通道的特征图转化为特定深度(分类数量,二分类为2)的结果。网络总共23层。

二 模块实现

1.卷积层

def double_conv(in_c,out_c):
    conv = nn.Sequential(
        nn.Conv2d(in_c,out_c,kernel_size=3,stride=1,padding=1),
        nn.BatchNorm2d(out_c),
        # 防止过拟合
        nn.Dropout(0.3),
        nn.ReLU(inplace=True),

        nn.Conv2d(out_c, out_c, kernel_size=3,stride=1,padding=1),
        nn.BatchNorm2d(out_c),
        nn.Dropout(0.3),
        nn.ReLU(inplace=True)
    )

    return conv

关于改动这块,因为原始论文里面图片在经过两次卷积的时候尺寸会改变,在这个任务分割里面希望输入输出图片一致,所以就先采用保持每次卷积过后尺寸不变的方法

2.上采样过程和激活函数

    def forward(self, x):
        R1 = self.down_conv_1(x)
        R2 = self.down_conv_2(self.down_sample1(R1))
        R3 = self.down_conv_3(self.down_sample2(R2))
        R4 = self.down_conv_4(self.down_sample3(R3))
        Y = self.down_conv_5(self.down_sample4(R4))

        # m1 = F.interpolate(R4,size=32,mode='nearest')
        # m2 = F.interpolate(R3, size=64 ,mode='nearest')
        # m3 = F.interpolate(R2, size=128 ,mode='nearest')
        # m4 = F.interpolate(R1, size=256 ,mode='nearest')

        U1 = self.up_conv_1(torch.cat((R4,self.up_1(Y)),dim=1))

        U2 = self.up_conv_2(torch.cat((R3, self.up_2(U1)), dim=1))

        U3 = self.up_conv_3(torch.cat((R2, self.up_3(U2)), dim=1))

        U4 = self.up_conv_4(torch.cat((R1, self.up_4(U3)), dim=1))

        return self.Th(self.output(U4))

上面改过之后,所以原始论文中关于裁剪下采样过程中卷积的输入,这里因为大小一样就不需要做这些操作,所以步骤就简单一点了。

下面是原始代码(当然也有借鉴别的大佬的)

import torch
import torch.nn as nn
from torch.nn import functional as F

def double_conv(in_c,out_c):
    conv = nn.Sequential(
        nn.Conv2d(in_c,out_c,kernel_size=3,stride=1,padding=1),
        nn.BatchNorm2d(out_c),
        # 防止过拟合
        nn.Dropout(0.3),
        nn.ReLU(inplace=True),

        nn.Conv2d(out_c, out_c, kernel_size=3,stride=1,padding=1),
        nn.BatchNorm2d(out_c),
        nn.Dropout(0.3),
        nn.ReLU(inplace=True)
    )

    return conv
class down_sampling(nn.Module):
    def __init__(self,c):
        super(down_sampling, self).__init__()

        self.down = nn.Sequential(
            nn.MaxPool2d(kernel_size=2,stride=2),
            nn.ReLU()
        )
    def forward(self,x):
        return self.down(x)






class UNet(nn.Module):
    def __init__(self):
        super(UNet, self).__init__()

        self.down_conv_1 = double_conv(3,64)
        self.down_sample1 = down_sampling(64)


        self.down_conv_2 = double_conv(64, 128)
        self.down_sample2 = down_sampling(128)

        self.down_conv_3 = double_conv(128, 256)
        self.down_sample3 = down_sampling(256)

        self.down_conv_4 = double_conv(256, 512)
        self.down_sample4 = down_sampling(512)

        self.down_conv_5 = double_conv(512,1024)



        self.up_1 = nn.ConvTranspose2d(1024, 512, 2, 2)
        self.up_conv_1 = double_conv(1024, 512)

        self.up_2 = nn.ConvTranspose2d(512, 256, 2, 2)
        self.up_conv_2 = double_conv(512, 256)

        self.up_3 = nn.ConvTranspose2d(256, 128, 2, 2)
        self.up_conv_3 = double_conv(256, 128)

        self.up_4 = nn.ConvTranspose2d(128, 64, 2, 2)
        self.up_conv_4 = double_conv(128, 64)

        self.Th = torch.nn.Sigmoid()
        self.output = nn.Conv2d(64, 3, 3, 1, 1)

    def forward(self, x):
        R1 = self.down_conv_1(x)
        R2 = self.down_conv_2(self.down_sample1(R1))
        R3 = self.down_conv_3(self.down_sample2(R2))
        R4 = self.down_conv_4(self.down_sample3(R3))
        Y = self.down_conv_5(self.down_sample4(R4))

        # m1 = F.interpolate(R4,size=32,mode='nearest')
        # m2 = F.interpolate(R3, size=64 ,mode='nearest')
        # m3 = F.interpolate(R2, size=128 ,mode='nearest')
        # m4 = F.interpolate(R1, size=256 ,mode='nearest')

        U1 = self.up_conv_1(torch.cat((R4,self.up_1(Y)),dim=1))

        U2 = self.up_conv_2(torch.cat((R3, self.up_2(U1)), dim=1))

        U3 = self.up_conv_3(torch.cat((R2, self.up_3(U2)), dim=1))

        U4 = self.up_conv_4(torch.cat((R1, self.up_4(U3)), dim=1))

        return self.Th(self.output(U4))

if __name__ == '__main__':
    a = torch.randn(2,3, 256, 256)
    net = UNet()
    print(net(a).shape)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值