构建vgg16网络模型代码解析

1.构建vgg16

from __future__ import print_function

from keras import backend as K
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Flatten, Dropout, Activation, Permute
from keras.models import Sequential
K.set_image_data_format('channels_last')  # WARNING : important for images and tensors dimensions ordering

def convblock(cdim, nb, bits=3):#cdim为卷积核数目即输出维度,nb为卷积层序号,bits为每个卷积块含有多少个卷积层
    L = []  #为层创建列表

    for k in range(1, bits + 1):#左开右闭,添加每个卷积块中卷积层的数量
        convname = 'conv' + str(nb) + '_' + str(k) #给卷积层命名
        # L.append( Convolution2D(cdim, 3, 3, border_mode='same', activation='relu', name=convname) ) # Keras 1
        L.append(Convolution2D(cdim, kernel_size=(3, 3), padding='same', activation='relu', name=convname))  # Keras 2 添加卷积层

    L.append(MaxPooling2D((2, 2), strides=(2, 2))) #每个卷积块添加一个池化层

    return L


def vgg_face_blank():
    withDO = True  # no effect during evaluation but usefull for fine-tuning

    if True:
        mdl = Sequential()

        # First layer is a dummy-permutation = Identity to specify input shape
        mdl.add(Permute((1, 2, 3), input_shape=(224, 224, 3)))  # WARNING : 0 is the sample dim
        #Permute层将输入的维度按照给定模式进行重排,1,2,3顺序不变,例如,当需要将RNN和CNN网络连接时,可能会用到该层。
        for l in convblock(64, 1, bits=2):#创建输出维度为64的第一个卷积块,含两个卷积层一个池化层
            mdl.add(l)

        for l in convblock(128, 2, bits=2):#创建输出维度为128的第二个卷积块,含两个卷积层一个池化层
            mdl.add(l)

        for l in convblock(256, 3, bits=3):#创建输出维度为256的第三个卷积块,含三个卷积层一个池化层
            mdl.add(l)

        for l in convblock(512, 4, bits=3):#创建输出维度为512的第四个卷积块,含三个卷积层一个池化层
            mdl.add(l)

        for l in convblock(512, 5, bits=3):#创建输出维度为512的第五个卷积块,含三个卷积层一个池化层
            mdl.add(l)

        # mdl.add( Convolution2D(4096, 7, 7, activation='relu', name='fc6') ) # Keras 1
        mdl.add(Convolution2D(4096, kernel_size=(7, 7), activation='relu', name='fc6'))  # Keras 2 添加第14层卷积层,此时输出维度为(1,1,4096)
        if withDO:
            mdl.add(Dropout(0.5)) #添加脱落层
        # mdl.add( Convolution2D(4096, 1, 1, activation='relu', name='fc7') ) # Keras 1
        mdl.add(Convolution2D(4096, kernel_size=(1, 1), activation='relu', name='fc7'))  # Keras 2  添加第15层卷积层输出维度为(1,1,4096)
        if withDO:
            mdl.add(Dropout(0.5))
        # mdl.add( Convolution2D(2622, 1, 1, name='fc8') ) # Keras 1
        mdl.add(Convolution2D(2622, kernel_size=(1, 1), activation='relu', name='fc8'))  # Keras 2 添加第16层卷积层输出维度为(1,1,2622)
        mdl.add(Flatten()) #添加全连接层 输出维度为(None, 2622) 
        mdl.add(Activation('softmax')) #最后为softmax输出层

        return mdl

    else:
        # See following link for a version based on Keras functional API :
        # gist.github.com/EncodeTS/6bbe8cb8bebad7a672f0d872561782d9
        raise ValueError('not implemented')


facemodel = vgg_face_blank()
facemodel.summary()

输出得到的网络概要:
在这里插入图片描述
在这里插入图片描述
代码解释部分:
(1)Permute(),交换维度顺序

model = Sequential()
model.add(Permute((3,2,1), input_shape=(224,224,3)))
print(model.output_shape)

得到
在这里插入图片描述
(2)创建的保存网络层的L列表

model = Sequential()
L=convblock(64, 1, bits=2)
print(L)

得到的是创建两个卷积层和一个池化层的地址
在这里插入图片描述
看其中某个卷积块的输出

model = Sequential()
for l in convblock(64, 1, bits=2):
    model.add(l)
print(model)

是个卷积块的地址
在这里插入图片描述
2.使用训练好的权重

from scipy.io import loadmat

def load_weight(from_vlfeat=False):#两种选择,看权重是从哪下载的,使用不同代码
    l = None
    if from_vlfeat:  # INFO : use this if you downloaded weights from vlfeat.org
        data = loadmat('vgg-face.mat', matlab_compatible=False, struct_as_record=False)
        l = data['layers']
        description = data['meta'][0, 0].classes[0, 0].description
    else:  # INFO : use this if you downloaded weights from robots.ox.ac.uk
        data = loadmat('D:\\software\\code\\face-anti-spoofing\\PAD-LSTM-master(cnn-lstm)\\PAD-LSTM\\vgg_face_matconvnet\\vgg_face_matconvnet\\data\\vgg_face.mat', matlab_compatible=False, struct_as_record=False)
        net = data['net'][0, 0]
        l = net.layers
        description = net.classes[0, 0].description
    return l, description

def weight_compare(kmodel): #新建的网络跟下载得到的权重比较
    kerasnames = [lr.name for lr in kmodel.layers]

    # WARNING : important setting as 2 of the 4 axis have same size dimension
    # prmt = (3,2,0,1) # INFO : for 'th' setting of 'dim_ordering'
    prmt = (0, 1, 2, 3)  # INFO : for 'channels_last' setting of 'image_data_format'
    l, description = load_weight()

    for i in range(l.shape[1]):
        matname = l[0, i][0, 0].name[0]
        mattype = l[0, i][0, 0].type[0]
        if matname in kerasnames:
            kindex = kerasnames.index(matname)
            print(matname, mattype) #权重名字,权重类型,即名字conv1-1,类型conv
            #l[0, i][0, 0].weights[0, 0]表示每层的权重,输出是个维度如第一层(3,3,3,64),l[0, i][0, 0].weights[0, 1]表示每层的偏置项,输出维度为(64,1)
            print(l[0, i][0, 0].weights[0, 0].transpose(prmt).shape, l[0, i][0, 0].weights[0, 1].shape)
            # kmodel.layers[kindex].get_weights()[0]应该还是表示每层权重,输出是个维度如第一层(3,3,3,64),kmodel.layers[kindex].get_weights()[1]表示该层所有偏置项?输出维度为(64,)
            print(kmodel.layers[kindex].get_weights()[0].shape, kmodel.layers[kindex].get_weights()[1].shape)
            print('------------------------------------------')
        else:
            print('MISSING : ', matname, mattype) #输出下载的模型权重中与构建的网络没有匹配的权重类型,如池化层,脱落层等,这边只有卷积层
            print('------------------------------------------')


def copy_mat_to_keras(kmodel): #复制权重到新网络
    kerasnames = [lr.name for lr in kmodel.layers]

    # WARNING : important setting as 2 of the 4 axis have same size dimension
    # prmt = (3,2,0,1) # INFO : for 'th' setting of 'dim_ordering'
    prmt = (0, 1, 2, 3)  # INFO : for 'channels_last' setting of 'image_data_format'
    l, description = load_weight()
    for i in range(l.shape[1]):
        matname = l[0, i][0, 0].name[0]
        if matname in kerasnames:
            kindex = kerasnames.index(matname)
            # print matname
            l_weights = l[0, i][0, 0].weights[0, 0]
            l_bias = l[0, i][0, 0].weights[0, 1]
            f_l_weights = l_weights.transpose(prmt)
            # f_l_weights = np.flip(f_l_weights, 2) # INFO : for 'th' setting in dim_ordering
            # f_l_weights = np.flip(f_l_weights, 3) # INFO : for 'th' setting in dim_ordering
            #get_weights()返回模型权重张量的列表,类型为numpy数组
            assert (f_l_weights.shape == kmodel.layers[kindex].get_weights()[0].shape) #assert 判断构建的网络是否与原来权重模型相等,比如这个是层权重维度是否相等
            assert (l_bias.shape[1] == 1) #这个是层偏置项是否相等
            assert (l_bias[:, 0].shape == kmodel.layers[kindex].get_weights()[1].shape)
            assert (len(kmodel.layers[kindex].get_weights()) == 2)
            #set_weights()从numpy数组里将权重加载给模型,要求数组具有与model.get_weights()相同的形状
            kmodel.layers[kindex].set_weights([f_l_weights, l_bias[:, 0]])

weight_compare(facemodel) #查看比较结果

weight_compare输出查看结果

conv1_1 conv
(3, 3, 3, 64) (64, 1) #第一个是每层权重的输出维度,第二个是偏置项的输出维度
(3, 3, 3, 64) (64,)
------------------------------------------
MISSING :  relu1_1 relu
------------------------------------------
conv1_2 conv
(3, 3, 64, 64) (64, 1)
(3, 3, 64, 64) (64,)
------------------------------------------
MISSING :  relu1_2 relu
------------------------------------------
MISSING :  pool1 pool
------------------------------------------
conv2_1 conv
(3, 3, 64, 128) (128, 1)
(3, 3, 64, 128) (128,)
------------------------------------------
MISSING :  relu2_1 relu
------------------------------------------
conv2_2 conv
(3, 3, 128, 128) (128, 1)
(3, 3, 128, 128) (128,)
------------------------------------------
MISSING :  relu2_2 relu
------------------------------------------
MISSING :  pool2 pool
------------------------------------------
conv3_1 conv
(3, 3, 128, 256) (256, 1)
(3, 3, 128, 256) (256,)
------------------------------------------
MISSING :  relu3_1 relu
------------------------------------------
conv3_2 conv
(3, 3, 256, 256) (256, 1)
(3, 3, 256, 256) (256,)
------------------------------------------
MISSING :  relu3_2 relu
------------------------------------------
conv3_3 conv
(3, 3, 256, 256) (256, 1)
(3, 3, 256, 256) (256,)
------------------------------------------
MISSING :  relu3_3 relu
------------------------------------------
MISSING :  pool3 pool
------------------------------------------
conv4_1 conv
(3, 3, 256, 512) (512, 1)
(3, 3, 256, 512) (512,)
------------------------------------------
MISSING :  relu4_1 relu
------------------------------------------
conv4_2 conv
(3, 3, 512, 512) (512, 1)
(3, 3, 512, 512) (512,)
------------------------------------------
MISSING :  relu4_2 relu
------------------------------------------
conv4_3 conv
(3, 3, 512, 512) (512, 1)
(3, 3, 512, 512) (512,)
------------------------------------------
MISSING :  relu4_3 relu
------------------------------------------
MISSING :  pool4 pool
------------------------------------------
conv5_1 conv
(3, 3, 512, 512) (512, 1)
(3, 3, 512, 512) (512,)
------------------------------------------
MISSING :  relu5_1 relu
------------------------------------------
conv5_2 conv
(3, 3, 512, 512) (512, 1)
(3, 3, 512, 512) (512,)
------------------------------------------
MISSING :  relu5_2 relu
------------------------------------------
conv5_3 conv
(3, 3, 512, 512) (512, 1)
(3, 3, 512, 512) (512,)
------------------------------------------
MISSING :  relu5_3 relu
------------------------------------------
MISSING :  pool5 pool
------------------------------------------
fc6 conv
(7, 7, 512, 4096) (4096, 1)
(7, 7, 512, 4096) (4096,)
------------------------------------------
MISSING :  relu6 relu
------------------------------------------
MISSING :  dropout6 dropout
------------------------------------------
fc7 conv
(1, 1, 4096, 4096) (4096, 1)
(1, 1, 4096, 4096) (4096,)
------------------------------------------
MISSING :  relu7 relu
------------------------------------------
MISSING :  dropout7 dropout
------------------------------------------
fc8 conv
(1, 1, 4096, 2622) (2622, 1)
(1, 1, 4096, 2622) (2622,)
------------------------------------------
MISSING :  softmax softmax
------------------------------------------

(1)原权重的解释

data = loadmat('vgg_face_matconvnet\\vgg_face_matconvnet\\data\\vgg_face.mat', matlab_compatible=False, struct_as_record=False)
print("data",data)
net = data['net'][0, 0]
print("net",net)
l = net.layers
print("l",l)
description = net.classes[0, 0].description
print("des",description )

data:
在这里插入图片描述
net:
在这里插入图片描述
l:

l [[array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x000002643758F7F0>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x000002643758FFD0>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x000002643758F630>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x000002643758F7B8>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026475FEB630>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026475FEBAC8>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026475FEB828>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437568C50>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437568DA0>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437568080>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437568240>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x00000264375682E8>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437568438>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x00000264375684A8>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437568710>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x00000264375686A0>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437568828>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x00000264375689E8>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x00000264375689B0>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437525828>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437525438>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437525128>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437525C88>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026475E4AAC8>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026475E4A898>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026475E4A588>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437543F60>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437543F98>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437543550>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x00000264375435C0>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x00000264375437B8>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x00000264375438D0>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437543940>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437543A90>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437543BA8>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437543C50>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437543E48>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437543EF0>]],
      dtype=object)
  array([[<scipy.io.matlab.mio5_params.mat_struct object at 0x0000026437223860>]],
      dtype=object)]]

description:
在这里插入图片描述
3.修改vgg16接LSTM网络

from __future__ import print_function

from keras import backend as K
from keras.layers import Convolution2D, MaxPooling2D, Dense, ConvLSTM2D, TimeDistributed
from keras.layers import Flatten, Dropout, Activation, Permute, InputLayer
from keras.models import Sequential, Model

K.set_image_data_format('channels_last')  # WARNING : important for images and tensors dimensions ordering


def convblock(cdim, nb, bits=3):
    L = []

    for k in range(1, bits + 1):
        convname = 'conv' + str(nb) + '_' + str(k)
        # L.append( Convolution2D(cdim, 3, 3, border_mode='same', activation='relu', name=convname) ) # Keras 1
        L.append(TimeDistributed(Convolution2D(cdim, kernel_size=(3, 3), padding='same', activation='relu', name=convname)))  # Keras 2

    L.append(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    return L


def vgg_face_blank():
    withDO = True  # no effect during evaluation but usefull for fine-tuning

    if True:
        mdl = Sequential()

        # First layer is a dummy-permutation = Identity to specify input shape
        mdl.add(InputLayer(input_shape=(7,224, 224, 3)))  # 输入得是四维,与convLSTM(5维)相接
   
        for l in convblock(64, 1, bits=2):
            mdl.add(l)

        for l in convblock(128, 2, bits=2):
            mdl.add(l)

        for l in convblock(256, 3, bits=3):
            mdl.add(l)

        for l in convblock(512, 4, bits=3):
            mdl.add(l)

        for l in convblock(512, 5, bits=3):
            mdl.add(l)

        # mdl.add( Convolution2D(4096, 7, 7, activation='relu', name='fc6') ) # Keras 1
        mdl.add(TimeDistributed(Convolution2D(4096, kernel_size=(7, 7), activation='relu', name='fc6')))  # Keras 2 得加上这个层封装器TimeDistributed包裹起来
        if withDO:
            mdl.add(Dropout(0.5))
        # mdl.add( Convolution2D(4096, 1, 1, activation='relu', name='fc7') ) # Keras 1
        mdl.add(TimeDistributed(Convolution2D(4096, kernel_size=(1, 1), activation='relu', name='fc7')))  # Keras 2
        if withDO:
            mdl.add(Dropout(0.5))
        # mdl.add( Convolution2D(2622, 1, 1, name='fc8') ) # Keras 1
        mdl.add(TimeDistributed(Convolution2D(2622, kernel_size=(1, 1), activation='relu', name='fc8')))  # Keras 2
        mdl.add(TimeDistributed(Flatten()))
        mdl.add(Activation('softmax'))

        return mdl

    else:
        # See following link for a version based on Keras functional API :
        # gist.github.com/EncodeTS/6bbe8cb8bebad7a672f0d872561782d9
        raise ValueError('not implemented')
        
facemodel = vgg_face_blank()      
last_layer = facemodel.layers[-4].output #在原来的倒数第四层dropout2之后加上了这个convlstm2d,也就是说dropout2后都被convlstm2d取代了
x = ConvLSTM2D(2049, kernel_size=(1,1), activation='relu', name='fc8')(last_layer)
x = Dropout(0.2)(x)
x = Flatten()(x) #输出向量
out = Dense(2, activation = 'softmax')(x)
custom_model = Model(facemodel.input, out)
custom_model.summary()

#使用另一种接LSTM的方法,lastyear还是上面那种
#x = TimeDistributed(Convolution2D(1000,kernel_size=(1,1), activation='relu', name='fc8'))(last_layer)
#x = TimeDistributed(Flatten())(last_layer)
#x = LSTM(30, dropout = 0.2, return_sequences = False)(x)
#out = Dense(2, activation = 'softmax')(x)
#custom_model = Model(facemodel.input, out)
#custom_model.summary()


第一种方法输出得到的模型

Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         (None, 7, 224, 224, 3)    0         
_________________________________________________________________
time_distributed_1 (TimeDist (None, 7, 224, 224, 64)   1792      
_________________________________________________________________
time_distributed_2 (TimeDist (None, 7, 224, 224, 64)   36928     
_________________________________________________________________
time_distributed_3 (TimeDist (None, 7, 112, 112, 64)   0         
_________________________________________________________________
time_distributed_4 (TimeDist (None, 7, 112, 112, 128)  73856     
_________________________________________________________________
time_distributed_5 (TimeDist (None, 7, 112, 112, 128)  147584    
_________________________________________________________________
time_distributed_6 (TimeDist (None, 7, 56, 56, 128)    0         
_________________________________________________________________
time_distributed_7 (TimeDist (None, 7, 56, 56, 256)    295168    
_________________________________________________________________
time_distributed_8 (TimeDist (None, 7, 56, 56, 256)    590080    
_________________________________________________________________
time_distributed_9 (TimeDist (None, 7, 56, 56, 256)    590080    
_________________________________________________________________
time_distributed_10 (TimeDis (None, 7, 28, 28, 256)    0         
_________________________________________________________________
time_distributed_11 (TimeDis (None, 7, 28, 28, 512)    1180160   
_________________________________________________________________
time_distributed_12 (TimeDis (None, 7, 28, 28, 512)    2359808   
_________________________________________________________________
time_distributed_13 (TimeDis (None, 7, 28, 28, 512)    2359808   
_________________________________________________________________
time_distributed_14 (TimeDis (None, 7, 14, 14, 512)    0         
_________________________________________________________________
time_distributed_15 (TimeDis (None, 7, 14, 14, 512)    2359808   
_________________________________________________________________
time_distributed_16 (TimeDis (None, 7, 14, 14, 512)    2359808   
_________________________________________________________________
time_distributed_17 (TimeDis (None, 7, 14, 14, 512)    2359808   
_________________________________________________________________
time_distributed_18 (TimeDis (None, 7, 7, 7, 512)      0         
_________________________________________________________________
time_distributed_19 (TimeDis (None, 7, 1, 1, 4096)     102764544 
_________________________________________________________________
dropout_1 (Dropout)          (None, 7, 1, 1, 4096)     0         
_________________________________________________________________
time_distributed_20 (TimeDis (None, 7, 1, 1, 4096)     16781312  
_________________________________________________________________
dropout_2 (Dropout)          (None, 7, 1, 1, 4096)     0         
_________________________________________________________________
fc8 (ConvLSTM2D)             (None, 1, 1, 2049)        50372616  
_________________________________________________________________
dropout_3 (Dropout)          (None, 1, 1, 2049)        0         
_________________________________________________________________
flatten_2 (Flatten)          (None, 2049)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 2)                 4100      
=================================================================
Total params: 184,637,260
Trainable params: 184,637,260
Non-trainable params: 0
_________________________________________________________________

第二种方法输出得到的模型

Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 7, 224, 224, 3)    0         
_________________________________________________________________
time_distributed_1 (TimeDist (None, 7, 224, 224, 64)   1792      
_________________________________________________________________
time_distributed_2 (TimeDist (None, 7, 224, 224, 64)   36928     
_________________________________________________________________
time_distributed_3 (TimeDist (None, 7, 112, 112, 64)   0         
_________________________________________________________________
time_distributed_4 (TimeDist (None, 7, 112, 112, 128)  73856     
_________________________________________________________________
time_distributed_5 (TimeDist (None, 7, 112, 112, 128)  147584    
_________________________________________________________________
time_distributed_6 (TimeDist (None, 7, 56, 56, 128)    0         
_________________________________________________________________
time_distributed_7 (TimeDist (None, 7, 56, 56, 256)    295168    
_________________________________________________________________
time_distributed_8 (TimeDist (None, 7, 56, 56, 256)    590080    
_________________________________________________________________
time_distributed_9 (TimeDist (None, 7, 56, 56, 256)    590080    
_________________________________________________________________
time_distributed_10 (TimeDis (None, 7, 28, 28, 256)    0         
_________________________________________________________________
time_distributed_11 (TimeDis (None, 7, 28, 28, 512)    1180160   
_________________________________________________________________
time_distributed_12 (TimeDis (None, 7, 28, 28, 512)    2359808   
_________________________________________________________________
time_distributed_13 (TimeDis (None, 7, 28, 28, 512)    2359808   
_________________________________________________________________
time_distributed_14 (TimeDis (None, 7, 14, 14, 512)    0         
_________________________________________________________________
time_distributed_15 (TimeDis (None, 7, 14, 14, 512)    2359808   
_________________________________________________________________
time_distributed_16 (TimeDis (None, 7, 14, 14, 512)    2359808   
_________________________________________________________________
time_distributed_17 (TimeDis (None, 7, 14, 14, 512)    2359808   
_________________________________________________________________
time_distributed_18 (TimeDis (None, 7, 7, 7, 512)      0         
_________________________________________________________________
time_distributed_19 (TimeDis (None, 7, 1, 1, 4096)     102764544 
_________________________________________________________________
dropout_1 (Dropout)          (None, 7, 1, 1, 4096)     0         
_________________________________________________________________
time_distributed_20 (TimeDis (None, 7, 1, 1, 4096)     16781312  
_________________________________________________________________
dropout_2 (Dropout)          (None, 7, 1, 1, 4096)     0         
_________________________________________________________________
time_distributed_24 (TimeDis (None, 7, 4096)           0         
_________________________________________________________________
lstm_1 (LSTM)                (None, 30)                495240    
_________________________________________________________________
dense_1 (Dense)              (None, 2)                 62        
=================================================================
Total params: 134,755,846
Trainable params: 134,755,846
Non-trainable params: 0
_________________________________________________________________

4.修改VGG更改最后输出分类结果
总之就是修改倒数第二个fc7 dropout层后面的内容,就是-4后面的层
有多种修改方法,不确定哪一种最合适
(1)后面接的还是一个卷积层,接着4096或者2048尝试,也可把dropout去掉试试
last_layer = facemodel.layers[-4].output
x = Convolution2D(4096,kernel_size=(1,1), activation=‘relu’, name=‘fc8’)(last_layer) #4096可尝试更改成中间值2048这样
x = Dropout(0.2)(x)
x = Flatten()(x) #输出向量
out = Dense(2, activation = ‘softmax’)(x)
4096输出为:

fc7 (Conv2D)                 (None, 1, 1, 4096)        16781312  
_________________________________________________________________
dropout_8 (Dropout)          (None, 1, 1, 4096)        0         
_________________________________________________________________
fc8 (Conv2D)                 (None, 1, 1, 4096)        16781312  
_________________________________________________________________
dropout_9 (Dropout)          (None, 1, 1, 4096)        0         
_________________________________________________________________
flatten_6 (Flatten)          (None, 4096)              0         
_________________________________________________________________
dense_2 (Dense)              (None, 2)                 8194      
=================================================================
Total params: 151,050,050
Trainable params: 151,050,050
Non-trainable params: 0

2048输出为:

fc7 (Conv2D)                 (None, 1, 1, 4096)        16781312  
_________________________________________________________________
dropout_11 (Dropout)         (None, 1, 1, 4096)        0         
_________________________________________________________________
fc8 (Conv2D)                 (None, 1, 1, 2048)        8390656   
_________________________________________________________________
dropout_12 (Dropout)         (None, 1, 1, 2048)        0         
_________________________________________________________________
flatten_8 (Flatten)          (None, 2048)              0         
_________________________________________________________________
dense_3 (Dense)              (None, 2)                 4098      
=================================================================
Total params: 142,655,298
Trainable params: 142,655,298
Non-trainable params: 0

(2)在后面直接接全连接层 先变成2048或4096,最后接2分类
last_layer = facemodel.layers[-4].output
x = Flatten()(last_layer)
x = Dense(2048, activation = ‘softmax’)(x)
x = Dropout(0.2)(x)
out = Dense(2, activation = ‘softmax’)(x)
输出为:

fc7 (Conv2D)                 (None, 1, 1, 4096)        16781312  
_________________________________________________________________
dropout_16 (Dropout)         (None, 1, 1, 4096)        0         
_________________________________________________________________
flatten_12 (Flatten)         (None, 4096)              0         
_________________________________________________________________
dense_5 (Dense)              (None, 2048)              8390656   
_________________________________________________________________
dropout_17 (Dropout)         (None, 2048)              0         
_________________________________________________________________
dense_6 (Dense)              (None, 2)                 4098      
=================================================================
Total params: 142,655,298
Trainable params: 142,655,298
Non-trainable params: 0

整个代码为

from __future__ import print_function

from keras import backend as K
from keras.layers import Convolution2D, MaxPooling2D, Dense
from keras.layers import Flatten, Dropout, Activation, InputLayer
from keras.models import Sequential,Model
K.set_image_data_format('channels_last')  # WARNING : important for images and tensors dimensions ordering

def convblock(cdim, nb, bits=3):#cdim为卷积核数目即输出维度,nb为卷积层序号,bits为每个卷积块含有多少个卷积层
    L = []  #为层创建列表

    for k in range(1, bits + 1):#左开右闭,添加每个卷积块中卷积层的数量
        convname = 'conv' + str(nb) + '_' + str(k) #给卷积层命名
        # L.append( Convolution2D(cdim, 3, 3, border_mode='same', activation='relu', name=convname) ) # Keras 1
        L.append(Convolution2D(cdim, kernel_size=(3, 3), padding='same', activation='relu', name=convname))  # Keras 2 添加卷积层

    L.append(MaxPooling2D((2, 2), strides=(2, 2))) #每个卷积块添加一个池化层

    return L


def vgg_face_blank():
    withDO = True  # no effect during evaluation but usefull for fine-tuning

    if True:
        mdl = Sequential()

        mdl.add(InputLayer(input_shape=(224,224,3)) )
        for l in convblock(64, 1, bits=2):#创建输出维度为64的第一个卷积块,含两个卷积层一个池化层
            mdl.add(l)

        for l in convblock(128, 2, bits=2):#创建输出维度为128的第二个卷积块,含两个卷积层一个池化层
            mdl.add(l)

        for l in convblock(256, 3, bits=3):#创建输出维度为256的第三个卷积块,含三个卷积层一个池化层
            mdl.add(l)

        for l in convblock(512, 4, bits=3):#创建输出维度为512的第四个卷积块,含三个卷积层一个池化层
            mdl.add(l)

        for l in convblock(512, 5, bits=3):#创建输出维度为512的第五个卷积块,含三个卷积层一个池化层
            mdl.add(l)

        # mdl.add( Convolution2D(4096, 7, 7, activation='relu', name='fc6') ) # Keras 1
        mdl.add(Convolution2D(4096, kernel_size=(7, 7), activation='relu', name='fc6'))  # Keras 2 添加第14层卷积层,此时输出维度为(1,1,4096)
        if withDO:
            mdl.add(Dropout(0.5)) #添加脱落层
        # mdl.add( Convolution2D(4096, 1, 1, activation='relu', name='fc7') ) # Keras 1
        mdl.add(Convolution2D(4096, kernel_size=(1, 1), activation='relu', name='fc7'))  # Keras 2  添加第15层卷积层输出维度为(1,1,4096)
        if withDO:
            mdl.add(Dropout(0.5))
        # mdl.add( Convolution2D(2622, 1, 1, name='fc8') ) # Keras 1
        mdl.add(Convolution2D(2622, kernel_size=(1, 1), activation='relu', name='fc8'))  # Keras 2 添加第16层卷积层输出维度为(1,1,2622)
        mdl.add(Flatten()) #添加全连接层 输出维度为(None, 2622) 
        mdl.add(Activation('softmax')) #最后为softmax输出层

        return mdl

    else:
        # See following link for a version based on Keras functional API :
        # gist.github.com/EncodeTS/6bbe8cb8bebad7a672f0d872561782d9
        raise ValueError('not implemented')

facemodel = vgg_face_blank()
last_layer = facemodel.layers[-4].output
#x = Convolution2D(2048,kernel_size=(1,1), activation='relu', name='fc8')(last_layer) #4096可尝试更改成中间值2048这样
x = Flatten()(last_layer) 
x = Dense(2048, activation = 'softmax')(x)
x = Dropout(0.2)(x)
#x = Flatten()(x) #输出向量
out = Dense(2, activation = 'softmax')(x) #Try sigmoid if it does not work. It is better for binary classification. it was softmax before #试试sigmoid,更适合二进制分类
custom_model = Model(facemodel.input, out) #组成模型 输入,out输出向量
custom_model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 224, 224, 3)       0         
_________________________________________________________________
conv1_1 (Conv2D)             (None, 224, 224, 64)      1792      
_________________________________________________________________
conv1_2 (Conv2D)             (None, 224, 224, 64)      36928     
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 112, 112, 64)      0         
_________________________________________________________________
conv2_1 (Conv2D)             (None, 112, 112, 128)     73856     
_________________________________________________________________
conv2_2 (Conv2D)             (None, 112, 112, 128)     147584    
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 56, 56, 128)       0         
_________________________________________________________________
conv3_1 (Conv2D)             (None, 56, 56, 256)       295168    
_________________________________________________________________
conv3_2 (Conv2D)             (None, 56, 56, 256)       590080    
_________________________________________________________________
conv3_3 (Conv2D)             (None, 56, 56, 256)       590080    
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 28, 28, 256)       0         
_________________________________________________________________
conv4_1 (Conv2D)             (None, 28, 28, 512)       1180160   
_________________________________________________________________
conv4_2 (Conv2D)             (None, 28, 28, 512)       2359808   
_________________________________________________________________
conv4_3 (Conv2D)             (None, 28, 28, 512)       2359808   
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 14, 14, 512)       0         
_________________________________________________________________
conv5_1 (Conv2D)             (None, 14, 14, 512)       2359808   
_________________________________________________________________
conv5_2 (Conv2D)             (None, 14, 14, 512)       2359808   
_________________________________________________________________
conv5_3 (Conv2D)             (None, 14, 14, 512)       2359808   
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 7, 7, 512)         0         
_________________________________________________________________
fc6 (Conv2D)                 (None, 1, 1, 4096)        102764544 
_________________________________________________________________
dropout_1 (Dropout)          (None, 1, 1, 4096)        0         
_________________________________________________________________
fc7 (Conv2D)                 (None, 1, 1, 4096)        16781312  
_________________________________________________________________
dropout_2 (Dropout)          (None, 1, 1, 4096)        0         
_________________________________________________________________
flatten_2 (Flatten)          (None, 4096)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 2048)              8390656   
_________________________________________________________________
dropout_3 (Dropout)          (None, 2048)              0         
_________________________________________________________________
dense_2 (Dense)              (None, 2)                 4098      
=================================================================
Total params: 142,655,298
Trainable params: 142,655,298
Non-trainable params: 0
_________________________________________________________________
  • 0
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值