yi
一、前言
本篇总结unet、unet++及添加了deep supervision的unet++ 原理及代码pytorch实现
二、原理
unet 和 unet++结构图分别如左右所示:
上面可看出unet 是简单向下采样4次,最后缩放16倍,再向上采样4次还原到原图像
unet++,则将各支路的节点都尽可能搭配利用起来
三、unet 和 unet++ 代码
先实现基础 vgg block:
import torch
import torch.nn as nn
import torch.nn.functional as F
class VGGBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, act_func=nn.ReLU(inplace=True)):
super(VGGBlock, self).__init__()
self.act_func = act_func
self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1)
self.bn1 = nn.BatchNorm2d(middle_channels)
self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.act_func(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.act_func(out)
return out
基本的下采样、上采样函数
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""
(convolution => [BN] => ReLU) * 2
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.C