网络介绍请参看:博文
keras搭建深度学习模型的若干方法:博文
直接上网络结构
迁移学习
依旧看看标准答案
import tensorflow as tf
from tensorflow import keras
base_model = keras.applications.MobileNetV2(weights='imagenet')
base_model.summary()
自编程序
Block
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers, models, Sequential, backend
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization, Activation, GlobalAveragePooling2D
from tensorflow.keras.layers import Concatenate, Lambda, Input, ZeroPadding2D, AveragePooling2D, DepthwiseConv2D, Reshape
def relu6(x):
return K.relu(x, max_value=6)
# 保证特征层数为8的倍数
def make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v+divisor/2)//divisor*divisor) #//向下取整,除
if new_v<0.9*v:
new_v +=divisor
return new_v
def conv_block (x, nb_filter, kernel=(1,1), stride=(1,1), name=None):
x = Conv2D(nb_filter, kernel, strides=stride, padding='same', use_bias=False, name=name+'_conv1')(x)
x = BatchNormalization(axis=3, name=name+'_bn1')(x)
x = Activation(relu6, name=name+'_relu')(x)
return x
def depthwise_res_block(x, nb_filter, kernel, stride, t, alpha, resdiual=False, name=None):
input_tensor=x
exp_channels= x.shape[-1]*t #扩展维度
alpha_channels = int(nb_filter*alpha) #压缩维度
x = conv_block(x, exp_channels, (1,1), (1,1), name=name+'_expansion')
x = DepthwiseConv2D(kernel, padding='same', strides=stride, depth_multiplier=1, use_bias=False, name=name+'_dpconv')(x)
x = BatchNormalization(axis=3, name=name+'_bn1')(x)
x = Activation(relu6, name=name+'_relu1')(x)
x = Conv2D(alpha_channels, (1,1), padding='same', use_bias=False, strides=(1,1), name=name+'_conv_2')(x)
x = BatchNormalization(axis=3, name=name+'_bn2')(x)
if resdiual:
x = layers.add([x, input_tensor])
return x
def inverted_residual_layers(x,nb_filter, stride, t, alpha, n, name=None):
x = depthwise_res_block(x, nb_filter, (3,3), stride, t, alpha,False, name=name+'_dep1')
for i in range(1, n):
x = depthwise_res_block(x, nb_filter, (3,3), (1,1), t, alpha, True, name=name+'_dep'+str(i+1) )
return x
此处注意网络中的所有卷积网络use_bias=False,即神经元无偏差设置