mobilenet_v2

import os
import warnings
import h5py
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers import Activation
from keras.layers import Dropout
from keras.layers import Reshape
from keras.layers import BatchNormalization
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.layers import Conv2D
from keras.layers import AveragePooling2D
from keras.layers import Flatten
from keras.layers import Add, Multiply
from keras.layers import Dense
from keras.layers import DepthwiseConv2D
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.utils import conv_utils
from keras.utils.data_utils import get_file
from keras.engine.topology import get_source_inputs
from keras.engine import InputSpec
from keras.applications import imagenet_utils
from keras.applications.imagenet_utils import _obtain_input_shape
from keras.applications.imagenet_utils import decode_predictions
from keras import backend as K


def relu6(x):
    return K.relu(x, max_value=6)

def hard_swish(x):
    return x*K.relu(x+3.0, max_value=6.0)/6.0

def preprocess_input(x):
    x /= 128.
    x -= 1.
    return x.astype(np.float32)

def _make_divisible(v, divisor, min_value=None):
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v

def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    in_channels = inputs._keras_shape[-1]
    # prefix = 'features.' + str(block_id) + '.conv.'
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    # Expand

    x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None,
               name='mobl%d_conv_%d_expand' % (block_id, block_id))(inputs)
    x = BatchNormalization(epsilon=0.001, name='bn%d_conv_%d_bn_expand' %
                           (block_id, block_id))(x)
    x = Activation(relu6, name='conv_%d_relu' % block_id)(x)
    # x = Activation(hard_swish, name='conv_%d_relu' % block_id)(x)

    # Depthwise
    x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same',
                        name='mobl%d_conv_%d_depthwise' % (block_id, block_id))(x)
    x = BatchNormalization(epsilon=0.001, name='bn%d_conv_%d_bn_depthwise' %
                           (block_id, block_id))(x)
    x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
    # x = Activation(hard_swish, name='conv_dw_%d_relu' % block_id)(x)

    # 引入注意力机制
    # if stride==2:
    #     x = squeeze_dw(x, block_id=block_id)

    # Project
    x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False,
               activation=None, name='mobl%d_conv_%d_project' % (block_id, block_id))(x)
    x = BatchNormalization(epsilon=0.001, name='bn%d_conv_%d_bn_project' %
                           (block_id, block_id))(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x

def _first_inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    in_channels = inputs._keras_shape[-1]
    # prefix = 'features.' + str(block_id) + '.conv.'
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)

    # Depthwise
    x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same',
                        name='mobl%d_conv_%d_depthwise' % (block_id, block_id))(inputs)
    x = BatchNormalization(epsilon=0.001, name='bn%d_conv_%d_bn_depthwise' %
                           (block_id, block_id))(x)
    x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
    # x = Activation(hard_swish, name='conv_dw_%d_relu' % block_id)(x)

    # Project
    x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False,
               activation=None, name='mobl%d_conv_%d_project' % (block_id, block_id))(x)
    x = BatchNormalization(epsilon=0.001, name='bn%d_conv_%d_bn_project' %
                           (block_id, block_id))(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x

def squeeze_dw(inputs, block_id=0):
    # 注意力机制单元
    input_channels = int(inputs.shape[-1])
    x = GlobalAveragePooling2D(name="conv_dw_%d_se_gap" % block_id)(inputs)
    x = Dense(int(input_channels / 16), kernel_initializer='random_uniform', name="conv_dw_%d_se_fc1" % block_id)(x)
    x = Activation(K.relu, name="conv_dw_%d_se_relu" % block_id)(x)
    x = Dense(input_channels, kernel_initializer='random_uniform', name="conv_dw_%d_se_fc2" % block_id)(x)
    x = Activation(K.sigmoid, name="conv_dw_%d_se_sigmoid" % block_id)(x)
    x = Reshape((1, 1, input_channels), name="conv_dw_%d_se_reshape" % block_id)(x)
    x = Multiply()([inputs, x])
    return x

def MobileNet_V2(inputs,embedding_size=128,dropout_keep_prob=0.4, alpha=1.0, depth_multiplier=1):

    # 32
    first_block_filters = _make_divisible(32 * alpha, 8)
    # 160,160,3 -> 80,80,32
    x = Conv2D(first_block_filters, kernel_size=3, strides=(2, 2), padding='same',
               use_bias=False, name='Conv1')(inputs)
    x = BatchNormalization(epsilon=1e-5, name='bn_Conv1')(x)
    x = Activation(relu6, name='Conv1_relu')(x)

    # 80,80,32 -> 80,80,16
    x = _first_inverted_res_block(x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0)

    # 80,80,16 -> 40,40,24
    x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1)
    x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2)

    # 40,40,24 -> 20,20,32
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3)
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4)
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,expansion=6, block_id=5)

    # 20,20,32 -> 10,10,64
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9)
    # 10,10,64 -> 10,10,96
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10)
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11)
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12)
    # 10,10,96 -> 5, 5, 160
    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2, expansion=6, block_id=13)
    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14)
    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15)
    # 5, 5, 160 -> 5, 5, 320
    x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16)

    # 5, 5, 320 -> 5, 5, 1280
    x = Conv2D(1280, kernel_size=1, use_bias=False, name='Conv_1')(x)
    x = BatchNormalization(epsilon=0.001, name='Conv_1_bn')(x)
    x = Activation(relu6, name='out_relu')(x)

    # 5, 5, 1280 -> 1280
    # pool_size = tuple(x.get_shape().as_list()[1:3])
    # x = AveragePooling2D(pool_size=pool_size)(x)
    # x = Flatten()(x)
    x = GlobalAveragePooling2D()(x)
    # 防止网络过拟合,训练的时候起作用
    x = Dropout(1.0 - dropout_keep_prob, name='Dropout')(x)
    # 全连接层到128
    # 128
    x = Dense(embedding_size,use_bias=False, name='Bottleneck')(x)
    x = BatchNormalization(momentum=0.995, epsilon=0.001, scale=False,
                           name='BatchNorm_Bottleneck')(x)

    model = Model(inputs, x, name='mobilenet_v2')

    return model

if __name__ == '__main__':
    inputs = Input([160, 160, 3])
    model = MobileNet_V2(inputs)
    model.summary()






  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值