mobilenet_v3

import math

import numpy as np
import tensorflow as tf
from keras import backend
from keras import backend as K
from keras.applications import imagenet_utils
from keras.applications.imagenet_utils import decode_predictions
from keras.layers import (Activation, Add, Conv2D, Dense, DepthwiseConv2D,
                          Dropout, GlobalAveragePooling2D, GlobalMaxPooling2D,
                          Input, Lambda, MaxPooling2D, ZeroPadding2D,Reshape,Multiply)
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.preprocessing import image
from keras.utils.data_utils import get_file

alpha = 1

def relu6(x):
#     定义relu6激活函数: relu6 = min(max(0, x), 6)
    return K.relu(x, max_value=6.0)

def hard_swish(x):
    return x*K.relu(x+3.0, max_value=6.0)/6.0

def return_activation(x, nl, block_id=0,conv_flag=True):
    if conv_flag:
        if nl == "HS":
            x = Activation(hard_swish,name="cba_%d_hs" % block_id)(x)
        if nl == "RE":
            x = Activation(relu6,name="cba_%d_re" % block_id)(x)   
        return x
    else:
        if nl == "HS":
            x = Activation(hard_swish,name="bneck%d_cba_hs" % block_id)(x)
        if nl == "RE":
            x = Activation(relu6,name="bneck%d_cba_re" % block_id)(x)   
        return x

def conv_block(inputs, filters, kernel, strides, nl, block_id=0,conv_flag=True):
    channel_axis = 1 if K.image_data_format()=='channels_first' else -1
    if conv_flag:
        x = Conv2D(filters, kernel, padding='same', strides=strides,kernel_initializer='random_uniform',
								name="cba_%d_c" % block_id)(inputs)
        x = BatchNormalization(axis=channel_axis,name="cba_%d_b" % block_id)(x)
        return return_activation(x, nl,block_id=block_id,conv_flag=conv_flag)
    else: 
        x = Conv2D(filters, kernel, padding='same', strides=strides,kernel_initializer='random_uniform',
                                                               name="bneck%d_cba_c" % block_id)(inputs)
        x = BatchNormalization(axis=channel_axis,name="bneck%d_cba_b" % block_id)(x)
        return return_activation(x, nl,block_id=block_id,conv_flag=conv_flag)

def squeeze(inputs, block_id=0):
    # 注意力机制单元
    input_channels = int(inputs.shape[-1])
    x = GlobalAveragePooling2D(name="bneck%d_gap" % block_id)(inputs)
    x = Dense(int(input_channels/4),kernel_initializer='random_uniform',name="bneck%d_dense" % block_id)(x)
    x = Activation(relu6,name="bneck%d_relu6" % block_id)(x)
    x = Dense(input_channels,kernel_initializer='random_uniform')(x)
    x = Activation(hard_swish,name="bneck%d_hard_swish" % block_id)(x)
    x = Reshape((1, 1, input_channels))(x)
    x = Multiply()([inputs, x])

    return x

def bottleneck(inputs, filters, kernel, up_dim, stride, sq, nl,block_id=1):
    
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    input_shape = K.int_shape(inputs)
    tchannel = int(up_dim)
    cchannel = int(alpha * filters)
    r = stride == 1 and input_shape[3] == filters
    
    # 1x1卷积调整通道数,通道数上升
    x = conv_block(inputs, tchannel, (1, 1), (1, 1), nl,block_id=block_id, conv_flag=False)
    
    # 进行3x3深度可分离卷积
    x = DepthwiseConv2D(kernel, strides=(stride, stride), depth_multiplier=1, padding='same',depthwise_initializer='random_uniform',
                        name="bneck%d_dw" % block_id)(x)
    x = BatchNormalization(axis=channel_axis,name="bneck%d_bn" % block_id)(x)
    x = return_activation(x, nl,block_id=block_id)
    

    # 引入注意力机制
    if sq:
        x = squeeze(x, block_id=block_id)


    # 下降通道数
    x = Conv2D(cchannel, (1, 1), strides=(1, 1), padding='same',kernel_initializer='random_uniform',name="bneck%d_se_conv" % block_id)(x)
    x = BatchNormalization(axis=channel_axis, name="bneck%d_se_bn" % block_id)(x)
    if r:
        x = Add(name="bneck%d_add" % block_id)([x, inputs])
    return x

def MobileNet_V3(inputs,embedding_size = 128,dropout_keep_prob=0.4):

    # 160,160,3 -> 80,80,16
    x = conv_block(inputs, 16, (3, 3), strides=(2, 2), nl='HS',block_id=0,conv_flag=True)

    
    
    # 80,80,16 -> 40, 40,16
    x = bottleneck(x, 16, (3, 3), up_dim=16, stride=2, sq=True, nl='RE',block_id=1)

    # 40, 40,16 -> 20,20,24
    x = bottleneck(x, 24, (3, 3), up_dim=72, stride=2, sq=False, nl='RE',block_id=2)
    x = bottleneck(x, 24, (3, 3), up_dim=88, stride=1, sq=False, nl='RE',block_id=3)
    
    # 20,20,24 -> 10,10,40
    x = bottleneck(x, 40, (5, 5), up_dim=96, stride=2, sq=True, nl='HS', block_id=4)
    x = bottleneck(x, 40, (5, 5), up_dim=240, stride=1, sq=True, nl='HS',block_id=5)
    x = bottleneck(x, 40, (5, 5), up_dim=240, stride=1, sq=True, nl='HS',block_id=6)
    # 10,10,40 -> 10,10,48
    x = bottleneck(x, 48, (5, 5), up_dim=120, stride=1, sq=True, nl='HS',block_id=7)
    x = bottleneck(x, 48, (5, 5), up_dim=144, stride=1, sq=True, nl='HS',block_id=8)

    # 10,10,48 -> 5,5,96
    x = bottleneck(x, 96, (5, 5), up_dim=288, stride=2, sq=True, nl='HS',block_id=9)
    x = bottleneck(x, 96, (5, 5), up_dim=576, stride=1, sq=True, nl='HS',block_id=10)
    x = bottleneck(x, 96, (5, 5), up_dim=576, stride=1, sq=True, nl='HS',block_id=11)
    
    
    # 5,5,96 --> 5, 5, 576
    x = conv_block(x, 576, (1, 1), strides=(1, 1), nl='hs',block_id=12,conv_flag=True)
    # 5,5,576 --> 576
    x = GlobalAveragePooling2D()(x)
    
    # 防止过拟合, 训练时候起作用
    x = Dropout(1.0-dropout_keep_prob, name="Dropout")(x)
    # 576 --> 1,1,576
    x = Reshape((1, 1, 576))(x)
    # 1,1,576 --> 1,1,1024
    x = Conv2D(1024, (1, 1), padding='same',kernel_initializer='random_uniform')(x)
    x = return_activation(x, 'HS',block_id=13)
    # 1,1,1024 --> 1,1,128
    x = Conv2D(embedding_size, (1, 1), padding='same', activation='softmax',kernel_initializer='random_uniform')(x)
    x = Reshape((embedding_size,))(x)

    model = Model(inputs, x,name='mobilenet_v3')

    return model

if __name__ == "__main__":
    inputs=Input([160, 160, 3])
    model = MobileNet_V3(inputs)
    model.summary()






  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值