接上一篇
接着聊聊FCN.py中的小细节的吧。。。
先附上代码
# -*- coding: utf-8 -*-
"""
FCN.py
"""
import torch.nn as nn
from torchvision.models.vgg import VGG
#继承nn.Module,撰写自己的网络层
class FCNs(nn.Module):
'''
类FCNs:将最后一个特征图直接上采样32倍(5次步长为2、卷积核为3*3的反卷积操作)得到的最终
分割结果。
'''
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2,
padding=1, dilation=1,
output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2,
padding=1, dilation=1,
output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2,
padding=1, dilation=1,
output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2,
padding=1, dilation=1,
output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2,
padding=1, dilation=1,
output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
#分类器是1*1大小的卷积,将channel个数从32减小到n_class
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5']
x4 = output['x4']
x3 = output['x3']
x2 = output['x