"""
网络结构
"""
self.conv_blocks_context =[]# 网络结构内容
self.conv_blocks_localization =[]# 网络结构局部化(本地化
self.td =[]# down,下采样的池化层
self.tu =[]# up,上采样,上采样卷积或者是转置
self.seg_outputs =[]# 最后一层的输出
output_features = base_num_features
input_features = input_channels # 输入通道"""
编码
"""for d inrange(num_pool):# ???determine the first stride 确定第一个步长if d !=0and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d -1]# 取d-1层的卷积核大小else:
first_stride =None
self.conv_kwargs['kernel_size']= self.conv_kernel_sizes[d]# 卷积核大小
self.conv_kwargs['padding']= self.conv_pad_sizes[d]# 卷积填充大小# add convolutions 添加定义的卷积模块 。# 调用堆叠卷积函数,加入num_conv_per_stage 个基础块,基础块:Conv-Dropout-Norm-Nonlin
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride, basic_block=basic_block))# 下采样ifnot self.convolutional_pooling:# pool_op是最大池化层, nn.MaxPool2d
self.td.append(pool_op(pool_op_kernel_sizes[d]))# 为下一次循环计算输入和输出的大小
input_features = output_features
# output_features:输出的featuremap的数量等于定义的数量乘以下采样因子
output_features =int(np.round(output_features * feat_map_mul_on_downscale))
output_features =min(output_features, self.max_num_features)#不能大于最大值"""
瓶颈结构:从编码到解码的最底层
"""# now the bottleneck.# bottleneck瓶颈结构指的是通道数的变化,即原通道数先减少,后增大到原来的大小,为了降低参数量# determine the first strideif self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]else:
first_stride =None# the output of the last conv must match the number of features from the skip connection if we are not using# convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be# done by the transposed conv# 最终的输出特征大小if self.convolutional_upsampling:# 如果我们使用卷积上采样,那么特征映射的减少将通过转置卷积来完成
final_num_features = output_features
else:# 如果我们不使用卷积上采样,最后一个conv的输出必须匹配跳过连接的特征数量。
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_kwargs['kernel_size']= self.conv_kernel_sizes[num_pool]
self.conv_kwargs['padding']= self.conv_pad_sizes[num_pool]# 加入卷积网络,也是num_conv_per_stage个,拆成两部分,最后一个卷积的输出final_num_features
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage -1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride, basic_block=basic_block),
StackedConvLayers(output_features, final_num_features,1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, basic_block=basic_block)))# if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here# 如果我们不想在局部路径中做dropout,那么我们在这里将dropout问题设置为零ifnot dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p']=0.0"""
解码结构
"""# now lets build the localization pathway 现在让我们构建局部路径for u inrange(num_pool):
nfeatures_from_down = final_num_features
# self.conv_blocks_context[-1]是 bottleneck, 所以从 -2开始取输出通道大小做跳跃连接# 对于解码部分,输入大小是输出大小的两倍
nfeatures_from_skip = self.conv_blocks_context[-(2+ u)].output_channels # nfeatures_from_skip是卷积部分的输出大小
n_features_after_tu_and_concat = nfeatures_from_skip *2# 跳跃连接后的大小,是卷积部分的输入大小# the first conv reduces the number of features to match those of skip# the following convs work on that number of features# if not convolutional upsampling then the final conv reduces the num of features again# 第一个conv减少了与skip相匹配的特征数量,# 接下来的卷积都是针对这个数量的特征工作的,# 如果不是卷积上采样,那么最后的conv再次减少了特征的数量if u != num_pool -1andnot self.convolutional_upsampling:# 如果不是最后一个模块并且不采用卷积上采样
final_num_features = self.conv_blocks_context[-(3+ u)].output_channels
else:
final_num_features = nfeatures_from_skip
ifnot self.convolutional_upsampling:# 采用插值算法进行上采样
self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u +1)], mode=upsample_mode))else:# 否则则进行转置卷积
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u +1)],
pool_op_kernel_sizes[-(u +1)], bias=False))
self.conv_kwargs['kernel_size']= self.conv_kernel_sizes[-(u +1)]
self.conv_kwargs['padding']= self.conv_pad_sizes[-(u +1)]# 在局部模块中添加Conv-dropout-bn-lrelu基础块,输入大小是上采样跳跃连接之后的n_features_after_tu_and_concat
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage -1,
self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),
StackedConvLayers(nfeatures_from_skip, final_num_features,1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs, basic_block=basic_block)))"""
分割预测图结果生成模块
"""for ds inrange(len(self.conv_blocks_localization)):
self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,1,1,0,1,1, seg_output_use_bias))
self.upscale_logits_ops =[]
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]# 通过累乘操作计算上采样的累计数for usl inrange(num_pool -1):if self.upscale_logits:# 如果设置有上采样因子则进行上采样,没有则不变
self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i)for i in cum_upsample[usl +1]]),
mode=upsample_mode))else:
self.upscale_logits_ops.append(lambda x: x)ifnot dropout_in_localization:
self.dropout_op_kwargs['p']= old_dropout_p
5.2.3 网络模块封装
"""
register all modules properly 正确注册所有模块,将模型封装在容器中
"""
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)# 解码结构中的conv_blocks部分
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)# 编码和瓶颈结构中的conv_blocks部分
self.td = nn.ModuleList(self.td)# 下采样,maxpools
self.tu = nn.ModuleList(self.tu)# 上采样,卷积上采样或者转置
self.seg_outputs = nn.ModuleList(self.seg_outputs)# 最后一层输出if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(# x不是一个模块,所以我们需要在这里进行区分
self.upscale_logits_ops)# lambda x:x is not a Module so we need to distinguish hereif self.weightInitializer isnotNone:
self.apply(self.weightInitializer)# self.apply(print_module_training_status)
5.2.4 def forward(self,x)
defforward(self, x):
skips =[]# 跳跃连接的内容
seg_outputs =[]# 编码过程for d inrange(len(self.conv_blocks_context)-1):
x = self.conv_blocks_context[d](x)# 编码conv_block的结果
skips.append(x)# 存储需要做跳跃连接的内容ifnot self.convolutional_pooling:
x = self.td[d](x)# 下采样# 瓶颈结构
x = self.conv_blocks_context[-1](x)# -1是瓶颈结构的内容# 解码过程for u inrange(len(self.tu)):
x = self.tu[u](x)# 上采样
x = torch.cat((x, skips[-(u +1)]), dim=1)# 跳跃连接
x = self.conv_blocks_localization[u](x)# 解码conv_block的结果
seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))#?if self._deep_supervision and self.do_ds:# deep_supervision:深层监测returntuple([seg_outputs[-1]]+[i(j)for i, j inzip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])else:return seg_outputs[-1]