[深度学习] FCN在TensorFlow上的实现

FCN在TensorFlow上的实现

参考 https://blog.csdn.net/MOU_IT/article/details/81073149 ,代码来自 https://github.com/shekkizh/FCN.tensorflow ,自己开始一点点在Jupyter上试,20180813对visualize模式进行了试验

数据 The model was applied on the Scene Parsing Challenge dataset provided by MIT http://sceneparsing.csail.mit.edu/ ,数据说明 https://github.com/CSAILVision/sceneparsing#overview-of-scene-parsing-benchmark ,数据和模型如果不提前下载,程序里也会下载

百度云分享一下:

  • Training Set/Validation Set链接: https://pan.baidu.com/s/1hDGlYIiCDlbi4VK_37FarQ 密码: gwhe
  • Test set链接: https://pan.baidu.com/s/1BZJM9ccrgtNLz0xT43nqUA 密码: x5hi
  • VGG网络的权重参数链接: https://pan.baidu.com/s/1kl4CnXc8xcPawQf0WoOZ4Q 密码: du1h
FCN.py
from __future__ import print_function
import tensorflow as tf
import numpy as np

import TensorflowUtils as utils
import read_MITSceneParsingData as scene_parsing
import datetime
import BatchDatsetReader as dataset
from six.moves import xrange

FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("batch_size", "2", "batch size for training")
tf.flags.DEFINE_string("logs_dir", "logs/", "path to logs directory")
tf.flags.DEFINE_string("data_dir", "data/", "path to dataset")
tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", "model/", "Path to vgg model mat")
tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False")
# 选择模式,现有train和visualize,test在下面没有找到对应的代码
# train会处理训练集,并生成模型,保存在logs中
# visualize会处理valid集合,并使用logs中生成的模型,可以参考这部分代码完成自己的分割程序
tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize")

MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'

# 迭代的最大次数
MAX_ITERATION = int(1e3 + 1)
# 分类的个数
NUM_OF_CLASSESS = 151
# 图片尺寸
IMAGE_SIZE = 224

# vgg_net:根据权重构建VGG网络
def vgg_net(weights, image):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {
   }
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        # 卷积层
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        # 激活函数
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        # 池化
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net

# 定义Semantic segmentation network,使用VGG结构
def inference(image, keep_prob):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    print("setting up vgg initialized conv layers ...")
    # download Model,建议提前下好,这样不会重新下载
    # 关于model的结构,可以看
    model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)

    # 获取图片像素均值
    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    # layers字段,所有的权重都存在这里面
    # 关于numpy的squeeze,可以看 https://blog.csdn.net/zenghaitao0128/article/details/78512715
    weights = np.squeeze(model_data['layers'])

    # image - mean_pixel:每一channel的均值
    processed_image = utils.process_image(image, mean_pixel)

    # 以inference为名的命名空间
    with tf.variable_scope("inference"):
        # 构建VGG网络
        image_net = vgg_net(weights, processed_image)
        # 最后一层
        conv_final_layer = image_net["conv5_3"]
        # 最后添加一层2*2的max pool
        pool5 = utils.max_pool_2x2(conv_final_layer)

        # 再加conv6,conv7,conv8三个卷基层,都用的ReLU
        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")
        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([
评论 146
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值