推荐博客
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import losses
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
import numpy as np
from collections import namedtuple
from nets.network import Network
from model.config import cfg
'''
https://www.cnblogs.com/hellcat/p/9726528.html
'''
# 分离卷积模块
def separable_conv2d_same(inputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D separable convolution with 'SAME' padding.
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.空洞卷积
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
'''
inputs: 是size为[batch_size, height, width, channels]的tensor
kernel_size: 是filter的size [kernel_height, kernel_width],如果filter的长宽一样可以只填入一个int值。
depth_multiplier:深度因子
'''
# By passing filters=None
# separable_conv2d produces only a depth-wise convolution layer
if stride == 1:
return slim.separable_conv2d(inputs, None, kernel_size,
depth_multiplier=1, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.separable_conv2d(inputs, None, kernel_size,
depth_multiplier=1, stride=stride, rate=rate,
padding='VALID', scope=scope)
# The following is adapted from:
# https://gi