语义分割—3 SPSNet


PSPnet采用了PSP模块。金字塔池化模块(Pyramid Pooling Module)能够聚合不同区域的上下文信息,从而提高获取全局信息的能力。PSP结构典型情况下,会将输入进来的特征层划分成6x6,3x3,2x2,1x1的网格,然后对每个子区域进行平均池化,再对上一步结果上采样到统一大小,最后把特征堆叠起来就获取了不同区域的上下文信息。

1 PSPnet结构

1.1. backbone:MobileNetV2

a. 结构

# 1.bottleneck
input
↓
CBA(1*1, 升维)
↓
DBA(3*3,s=1/2)
↓
CB(1*1, 降维)if skip_connection:add(input)

# 2.MobileNetV2
input
↓
conv2d(f=32,s=2)
↓
bottleneck*1(expand=1,f=16,s=1)
↓
bottleneck*2(expand=6,f=24,s=2)
↓
bottleneck*3(expand=6,f=32,s=2)
↓
bottleneck*4(expand=6,f=64,s=2)
↓
bottleneck*3(expand=6,f=96,s=1)
↓
bottleneck*3(expand=6,f=160,s=2)->f4
↓
bottleneck*1(expand=6,f=320,s=1)->f5
↓
conv2d(1*1,f=1280,s=1)
↓
avgpool(7*7)
↓
conv2d(1*1,f=clc)

b. 代码

from keras import layers
from keras.activations import relu
from keras.layers import (Activation, Add, BatchNormalization, Concatenate,
                          Conv2D, DepthwiseConv2D, Dropout,
                          GlobalAveragePooling2D, Input, Lambda, ZeroPadding2D)
from keras.models import Model


def _make_divisible(v, divisor, min_value=None):
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v

def relu6(x):
    return relu(x, max_value=6)

def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):
    in_channels = inputs.shape[-1].value
    pointwise_filters = _make_divisible(int(filters * alpha), 8)
    prefix = 'expanded_conv_{}_'.format(block_id)

    x = inputs

    #----------------------------------------------------#
    #   利用1x1卷积根据输入进来的通道数进行通道数上升
    #----------------------------------------------------#
    if block_id:
        x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
                   use_bias=False, activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    #----------------------------------------------------#
    #   利用深度可分离卷积进行特征提取
    #----------------------------------------------------#
    x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
                        use_bias=False, padding='same', dilation_rate=(rate, rate),
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999,name=prefix + 'depthwise_BN')(x)
    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)

    #----------------------------------------------------#
    #   利用1x1的卷积进行通道数的下降
    #----------------------------------------------------#
    x = Conv2D(pointwise_filters,
               kernel_size=1, padding='same', use_bias=False, activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x)

    #----------------------------------------------------#
    #   添加残差边
    #----------------------------------------------------#
    if skip_connection:
        return Add(name=prefix + 'add')([inputs, x])
    return x

def get_mobilenet_encoder(inputs_size, downsample_factor=8):
    if downsample_factor == 16:  # 下采样4次,即宽高压缩4次
        block4_dilation = 1
        block5_dilation = 2
        block4_stride = 2
    elif downsample_factor == 8:  # 下采样3次,即宽高压缩3次
        block4_dilation = 2
        block5_dilation = 4
        block4_stride = 1
    else:
        raise ValueError('Unsupported factor - `{}`, Use 8 or 16.'.format(downsample_factor))
    
    # 473,473,3
    inputs = Input(shape=inputs_size)

    alpha=1.0
    first_block_filters = _make_divisible(32 * alpha, 8)

    # 473,473,3 -> 237,237,32
    x = Conv2D(first_block_filters,
                kernel_size=3,
                strides=(2, 2), padding='same',
                use_bias=False, name='Conv')(inputs)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
    x = Activation(relu6, name='Conv_Relu6')(x)

    # 237,237,32 -> 237,237,16
    x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
                            expansion=1, block_id=0, skip_connection=False)

    #---------------------------------------------------------------#
    # 237,237,16 -> 119,119,24
    x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
                            expansion=6, block_id=1, skip_connection=False)
    x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
                            expansion=6, block_id=2, skip_connection=True)
                            
    #---------------------------------------------------------------#
    # 119,119,24 -> 60,60.32
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
                            expansion=6, block_id=3, skip_connection=False)
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
                            expansion=6, block_id=4, skip_connection=True)
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
                            expansion=6, block_id=5, skip_connection=True)

    #---------------------------------------------------------------#
    # 60,60,32 -> 30,30.64
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=block4_stride,
                            expansion=6, block_id=6, skip_connection=False)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=block4_dilation,
                            expansion=6, block_id=7, skip_connection=True)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=block4_dilation,
                            expansion=6, block_id=8, skip_connection=True)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=block4_dilation,
                            expansion=6, block_id=9, skip_connection=True)

    # 30,30.64 -> 30,30.96
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=block4_dilation,
                            expansion=6, block_id=10, skip_connection=False)
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=block4_dilation,
                            expansion=6, block_id=11, skip_connection=True)
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=block4_dilation,
                            expansion=6, block_id=12, skip_connection=True)
    # 辅助分支训练
    f4 = x

    #---------------------------------------------------------------#
    # 30,30.96 -> 30,30,160 -> 30,30,320
    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=block4_dilation,
                            expansion=6, block_id=13, skip_connection=False)
    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=block5_dilation,
                            expansion=6, block_id=14, skip_connection=True)
    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=block5_dilation,
                            expansion=6, block_id=15, skip_connection=True)

    x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=block5_dilation,
                            expansion=6, block_id=16, skip_connection=False)
    f5 = x
    return inputs, f4, f5


1.2. neck:PSP

a. 结构

input->AveragePooling2D(k=30,s=30)->CBA->resize->[30,30,80]->p1
input->AveragePooling2D(k=15,s=15)->CBA->resize->[30,30,80]->p2
input->AveragePooling2D(k=10,s=10)->CBA->resize->[30,30,80]->p3
input->AveragePooling2D(k=5, s=5 )->CBA->resize->[30,30,80]->p4
↓
concatenate([p1,p2,p3,p4])

b. 代码

def pool_block(feats, pool_factor, out_channel):
    h = K.int_shape(feats)[1]
    w = K.int_shape(feats)[2]
    #-----------------------------------------------------#
    #   分区域进行平均池化
    #   strides     = [30,30], [15,15], [10,10], [5, 5]
    #   poolsize    = 30/1=30  30/2=15  30/3=10  30/6=5
    #-----------------------------------------------------#
    pool_size = strides = [int(np.round(float(h)/pool_factor)),int(np.round(float(w)/pool_factor))]
    x = AveragePooling2D(pool_size , data_format=IMAGE_ORDERING , strides=strides, padding='same')(feats)

    #-----------------------------------------------------#
    #   利用1x1卷积进行通道数的调整
    #-----------------------------------------------------#
    x = Conv2D(out_channel//4, (1 ,1), data_format=IMAGE_ORDERING, padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    #-----------------------------------------------------#
    #   利用resize扩大特征层面积
    #-----------------------------------------------------#
    x = Lambda(resize_images)([x, feats])
    return x

1.3 head

a. 结构

input(PSP)
↓
CBADropoutt
↓
Conv(f=cls,k=1)->O
↓
if aux_branch:
f4->CBAD->Conv(f=cls,k=1)->resize+A->f4

b. model代码

def pspnet(n_classes, inputs_size, downsample_factor=8, backbone='mobilenet', aux_branch=True):
    if backbone == "mobilenet":
        #----------------------------------#
        #   获得两个特征层
        #   f4为辅助分支    [30,30,96]
        #   o为主干部分     [30,30,320]
        #----------------------------------#
        img_input, f4, o = get_mobilenet_encoder(inputs_size, downsample_factor=downsample_factor)
        out_channel = 320
    elif backbone == "resnet50":
        img_input, f4, o = get_resnet50_encoder(inputs_size, downsample_factor=downsample_factor)
        out_channel = 2048
    else:
        raise ValueError('Unsupported backbone - `{}`, Use mobilenet, resnet50.'.format(backbone))

    #--------------------------------------------------------------#
    #	PSP模块,分区域进行池化
    #   分别分割成1x1的区域,2x2的区域,3x3的区域,6x6的区域
    #--------------------------------------------------------------#
    pool_factors = [1, 2, 3, 6]
    pool_outs = [o]
    for p in pool_factors:
        pooled = pool_block(o, p, out_channel)
        pool_outs.append(pooled)

    #--------------------------------------------------------------------------------#
    #   利用获取到的特征层进行堆叠
    #   30, 30, 320 + 30, 30, 80 + 30, 30, 80 + 30, 30, 80 + 30, 30, 80 = 30, 30, 640
    #--------------------------------------------------------------------------------#
    o = Concatenate(axis=MERGE_AXIS)(pool_outs)

    # 30, 30, 640 -> 30, 30, 80
    o = Conv2D(out_channel//4, (3,3), data_format=IMAGE_ORDERING, padding='same', use_bias=False)(o)
    o = BatchNormalization()(o)
    o = Activation('relu')(o)

    # 防止过拟合
    o = Dropout(0.1)(o)

    #---------------------------------------------------#
    #	利用特征获得预测结果
    #   30, 30, 80 -> 30, 30, 21 -> 473, 473, 21
    #---------------------------------------------------#
    o = Conv2D(n_classes,(1,1),data_format=IMAGE_ORDERING, padding='same')(o)
    o = Lambda(resize_images)([o, img_input])

    #---------------------------------------------------#
    #   获得每一个像素点属于每一个类的概率
    #---------------------------------------------------#
    o = Activation("softmax", name="main")(o)

    if aux_branch:
        # 30, 30, 96 -> 30, 30, 40 
        f4 = Conv2D(out_channel//8, (3,3), data_format=IMAGE_ORDERING, padding='same', use_bias=False, name="branch_conv1")(f4)
        f4 = BatchNormalization(name="branch_batchnor1")(f4)
        f4 = Activation('relu', name="branch_relu1")(f4)
        f4 = Dropout(0.1)(f4)
        #---------------------------------------------------#
        #	利用特征获得预测结果
        #   30, 30, 40 -> 30, 30, 21 -> 473, 473, 21
        #---------------------------------------------------#
        f4 = Conv2D(n_classes,(1,1),data_format=IMAGE_ORDERING, padding='same', name="branch_conv2")(f4)
        f4 = Lambda(resize_images, name="branch_resize")([f4, img_input])

        f4 = Activation("softmax", name="aux")(f4)
        model = Model(img_input,[f4,o])
        return model
    else:
        model = Model(img_input,[o])
        return model


2 损失

a. 损失函数

# 1.Cross Entropy Loss。
# 2.Dice Loss = 1 - Dice

Dice = 2(x*y)/(|x|+|y|)



b. 代码

def dice_loss_with_CE(beta=1, smooth = 1e-5):
    def _dice_loss_with_CE(y_true, y_pred):
        y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())

        CE_loss = - y_true[...,:-1] * K.log(y_pred)
        CE_loss = K.mean(K.sum(CE_loss, axis = -1))

        tp = K.sum(y_true[...,:-1] * y_pred, axis=[0,1,2])
        fp = K.sum(y_pred         , axis=[0,1,2]) - tp
        fn = K.sum(y_true[...,:-1], axis=[0,1,2]) - tp

        score = ((1 + beta ** 2) * tp + smooth) / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + smooth)
        score = tf.reduce_mean(score)
        dice_loss = 1 - score
        # dice_loss = tf.Print(dice_loss, [dice_loss, CE_loss])
        return CE_loss + dice_loss
    return _dice_loss_with_CE

def CE():
    def _CE(y_true, y_pred):
        y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())

        CE_loss = - y_true[...,:-1] * K.log(y_pred)
        CE_loss = K.mean(K.sum(CE_loss, axis = -1))
        # dice_loss = tf.Print(CE_loss, [CE_loss])
        return CE_loss
    return _CE

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
本课程适合具有一定深度学习基础,希望发展为深度学习之计算机视觉方向的算法工程师和研发人员的同学们。基于深度学习的计算机视觉是目前人工智能最活跃的领域,应用非常广泛,如人脸识别和无人驾驶中的机器视觉等。该领域的发展日新月异,网络模型和算法层出不穷。如何快速入门并达到可以从事研发的高度对新手和中级水平的学生而言面临不少的挑战。精心准备的本课程希望帮助大家尽快掌握基于深度学习的计算机视觉的基本原理、核心算法和当前的领先技术,从而有望成为深度学习之计算机视觉方向的算法工程师和研发人员。本课程系统全面地讲述基于深度学习的计算机视觉技术的原理并进行项目实践。课程涵盖计算机视觉的七大任务,包括图像分类、目标检测、图像分割语义分割、实例分割、全景分割)、人脸识别、图像描述、图像检索、图像生成(利用生成对抗网络)。本课程注重原理和实践相结合,逐篇深入解读经典和前沿论文70余篇,图文并茂破译算法难点, 使用思维导图梳理技术要点。项目实践使用Keras框架(后端为Tensorflow),学员可快速上手。通过本课程的学习,学员可把握基于深度学习的计算机视觉的技术发展脉络,掌握相关技术原理和算法,有助于开展该领域的研究与开发实战工作。另外,深度学习之计算机视觉方向的知识结构及学习建议请参见本人CSDN博客。本课程提供课程资料的课件PPT(pdf格式)和项目实践代码,方便学员学习和复习。本课程分为上下两部分,其中上部包含课程的前五章(课程介绍、深度学习基础、图像分类、目标检测、图像分割),下部包含课程的后四章(人脸识别、图像描述、图像检索、图像生成)。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值