接上文,下面我们来介绍具体的网络结构图,起始代码如下:
prelogits, _ = network.inference(image_batch, args.keep_probability,
phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
weight_decay=args.weight_decay)
根据上一章介绍,实际执行的是 network = importlib.import_module(models.inception_resnet_v1),即导入models/文件夹下的inception_resnet_v1.py并命名为network,因此我们查看inception_resnet_v1.py下对应的inference函数:
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
使用with slim.arg_scope对常用函数进行了定义,真正的网络结构为函数inception_resnet_v1:
def inception_resnet_v1(inputs, is_training=True,
dropout_keep_prob=0.8,
bottleneck_layer_size=128,
reuse=None,
scope='InceptionResnetV1'):
"""Creates the Inception Resnet V1 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 256
net = slim.conv2d(net, 256, 3, stride=2, padding='VALID',
scope='Conv2d_4b_3x3')
end_points['Conv2d_4b_3x3'] = net
# 5 x Inception-resnet-A
net = slim.repeat(net, 5, block35, scale=0.17)
# Reduction-A
with tf.variable_scope('Mixed_6a'):
net = reduction_a(net, 192, 192, 256, 384)
end_points['Mixed_6a'] = net
# 10 x Inception-Resnet-B
net = slim.repeat(net, 10, block17, scale=0.10)
# Reduction-B
with tf.variable_scope('Mixed_7a'):
net = reduction_b(net)
end_points['Mixed_7a'] = net
# 5 x Inception-Resnet-C
net = slim.repeat(net, 5, block8, scale=0.20)
net = block8(net, activation_fn=None)
with tf.variable_scope('Logits'):
end_points['PrePool'] = net
#pylint: disable=no-member
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None,
scope='Bottleneck', reuse=False)
return net, end_points
inception_resnet_v1的网络结构如下图:
注意几点,代码通过net = slim.repeat(net, 5, block35, scale=0.17)来完成resnet网络的插入,通过reduction_a和reduction_b来实现reduction模块,这里不再详述,最后返回提取的特征net以及字典end_points,end_points记录了各个中间层的结果。
接下来调用slim的全连接层slim.fully_connected生成全连接层:
logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(args.weight_decay),
scope='Logits', reuse=False)
slim.fully_connected的第二个参数是输出的单元数len(train_set),由上文数据加载部分的介绍可以知道len(train_set)为所有人的个数,接下来同样对prologits处理,对其进行单位化处理:
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
对输出进行单位化处理。