残差网络实现

残差网络可以解决多层神经网络问题,这里使用mnist数据集实现一下残差网络。
网络模型如下:
在这里插入图片描述
该代码可以在普通VGG网络中随意添加残差层(ResNet(x1, x2, layer_name)),方便建立自己的网络模型。

代码:

# tensorflow基于mnist数据集上的VGG11网络,可以直接运行
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
import os


def maxPoolLayer(x, layer_name, ksize=2, strides=2):
    '''池化层'''
    '''池化窗口大小:ksize=[1,height,width,1]'''
    '''滑动步长大小:strides=[batch滑动步长, height方向滑动步长, width方向滑动步长, channels方向滑动步长]'''
    return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1], strides=[1, strides, strides, 1], padding='SAME', name=layer_name)


def con2d_layer(x, out_chs, ksize, layer_name, strides=1):
    '''卷积层'''
    d_in = x.get_shape()[-1].value
    with tf.variable_scope(layer_name):
        '''卷积核:shape=[filter_height, filter_width, in_channels, out_channels]'''
        w = tf.get_variable('weights', shape=[ksize, ksize, d_in, out_chs],
                            initializer=tf.truncated_normal_initializer(stddev=0.1))  # 初始化权重值
        # '''shape=[out_channels]'''
        b = tf.get_variable('bias', shape=[out_chs],
                            initializer=tf.truncated_normal_initializer(stddev=0.1))  # 初始化偏执值
        y = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w, strides=[1, strides, strides, 1], padding='SAME'), b))  # 卷积运算
    return y


def fc_layer(x, out_kernels, layer_name):
    '''全连接层'''
    n_in = x.get_shape()[-1].value
    with tf.variable_scope(layer_name):
        w = tf.get_variable('weights', shape=[n_in, out_kernels], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
        b = tf.get_variable('bias', shape=[out_kernels], dtype=tf.float32, trainable=True,
                            initializer=tf.truncated_normal_initializer(stddev=0.1))
        y = tf.nn.relu(tf.nn.bias_add(tf.matmul(x, w), b))
    return y


def ResNet(x1, x2, layer_name):
    '''残差计算'''
    '''x1为前卷积层'''
    '''x2为后卷积层'''
    with tf.variable_scope(layer_name):
        if x1.shape[1] == x2.shape[1] and x1.shape[3] == x2.shape[3]:               # 当两个卷积结果维度相同时,直接将两卷积求和
            conv_add = tf.nn.relu(tf.add(x1, x2))
        else:                                                                       # 当两个卷积结果维度不同时,将想x1进行卷积操作,使x1与x2维度相同
            w = tf.get_variable('weights', shape=[1, 1, x1.shape[3], x2.shape[3]], dtype=tf.float32,
                                initializer=tf.contrib.layers.xavier_initializer_conv2d())
            conv = tf.nn.conv2d(x1, w, strides=[1, x1.shape[1]//x2.shape[1], x1.shape[1]//x2.shape[1], 1], padding='VALID')                 # 通常情况下,卷积后图像大小不会变大,通过计算可以获取步长
            conv_add = tf.nn.relu(tf.add(conv, x2))
    return conv_add


def VGG11(x, _dropout, n_cls):
    '''输入:28*28*1'''
    '''输出:14*14*64'''
    conv1_1 = con2d_layer(x, 64, 2, 'conv1_1', strides=2)
    pool1 = maxPoolLayer(conv1_1, 'pool1', ksize=3, strides=1)

    '''输入:14*14*64'''
    '''输出:14*14*64'''
    conv2_1 = con2d_layer(pool1, 64, 1, 'conv2_1', strides=1)

    '''输入:14*14*64'''
    '''输出:14*14*256'''
    conv2_2 = con2d_layer(conv2_1, 64, 3, 'conv2_2', strides=1)
    conv2_3 = con2d_layer(conv2_2, 256, 1, 'conv2_3', strides=1)

    '''输入:x1=14*14*64, x2=14*14*256'''
    '''输出:14*14*256'''
    residual2 = ResNet(pool1, conv2_3, 'residual2')

    '''输入:14*14*256'''
    '''输出:14*14*64'''
    conv3_1 = con2d_layer(residual2, 64, 1, 'conv3_1', strides=1)

    '''输入:14*14*64'''
    '''输出:14*14*64'''
    conv3_2 = con2d_layer(conv3_1, 64, 3, 'conv3_2', strides=1)

    '''输入:14*14*64'''
    '''输出:14*14*256'''
    conv3_3 = con2d_layer(conv3_2, 256, 1, 'conv3_3', strides=1)

    '''输入:x1=14*14*256, x2=14*14*256'''
    '''输出:14*14*256'''
    residual3 = ResNet(residual2, conv3_3, 'residual3')

    '''输入:14*14*256'''
    '''输出:14*14*64'''
    conv4_1 = con2d_layer(residual3, 64, 1, 'conv4_1', strides=1)

    '''输入:14*14*64'''
    '''输出:14*14*64'''
    conv4_2 = con2d_layer(conv4_1, 64, 3, 'conv4_2', strides=1)

    '''输入:14*14*64'''
    '''输出:14*14*256'''
    conv4_3 = con2d_layer(conv4_2, 256, 1, 'conv4_3', strides=1)

    '''输入:x1=14*14*256, x2=14*14*256'''
    '''输出:14*14*256'''
    residual4 = ResNet(residual3, conv4_3, 'residual4')

    '''输入:x1=14*14*256'''
    '''输出:7*7*256'''
    pool4 = maxPoolLayer(residual4, 'pool4', ksize=2, strides=2)

    '''将pool_5变换为一维向量'''
    pool4_flatten_dims = int(np.prod(pool4.get_shape().as_list()[1:]))
    pool4_flatten = tf.reshape(pool4, [-1, pool4_flatten_dims])

    '''(7*7*256) * 1024'''
    fc5 = fc_layer(pool4_flatten, 1024, 'fc5')
    dropout5 = tf.nn.dropout(fc5, _dropout)
    '''1024 * n_cls全连接层'''
    w = tf.Variable(tf.truncated_normal(shape=[1024, n_cls], mean=0, stddev=0.1))
    b = tf.Variable(tf.constant(0.1, shape=[n_cls]))
    fc6 = tf.matmul(dropout5, w) + b
    # fc2 = fc_layer(dropout1, n_cls, 'fc2')
    return fc6


# tensorflow基于mnist实现VGG11
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
n_cls = 10              # 识别种类

x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, n_cls])
sess = tf.InteractiveSession()

x1 = tf.reshape(x, [-1, 28, 28, 1])
keep_prob = tf.placeholder(tf.float32)
y_conv = VGG11(x1, keep_prob, n_cls)

# 建立损失函数,在这里采用交叉熵函数
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_conv))

train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)

# 初始化变量
sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./log", sess.graph)
for i in range(3001):
    batch = mnist.train.next_batch(10)
    if i % 100 == 0:
        result = sess.run(merged, feed_dict={
            x: batch[0], y: batch[1], keep_prob: 1.0})
        writer.add_summary(result, i)
        train_accuracy = accuracy.eval(feed_dict={
            x: batch[0], y: batch[1], keep_prob: 1.0})
        print("step %d, training accuracy %g" % (i, train_accuracy))
    train_step.run(feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5})

# print("test accuracy %g"%accuracy.eval(feed_dict={
#     x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0}))

writer.close()
sess.close()

源码下载链接:https://download.csdn.net/download/OEMT_301/12571313

  • 1
    点赞
  • 22
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个简单的残差网络实现的 Python 代码: ```python from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, Add from tensorflow.keras.models import Model def residual_block(x, filters, kernel_size=(3, 3), strides=(1, 1), padding='same'): # 定义残差块 y = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding)(x) y = BatchNormalization()(y) y = Activation('relu')(y) y = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding)(y) y = BatchNormalization()(y) y = Add()([x, y]) y = Activation('relu')(y) return y def residual_network(input_shape, num_classes): # 定义残差网络 inputs = Input(shape=input_shape) x = Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), padding='same')(inputs) x = BatchNormalization()(x) x = Activation('relu')(x) x = residual_block(x, filters=64) x = residual_block(x, filters=64) x = residual_block(x, filters=64) x = residual_block(x, filters=128, strides=(2, 2)) x = residual_block(x, filters=128) x = residual_block(x, filters=128) x = residual_block(x, filters=256, strides=(2, 2)) x = residual_block(x, filters=256) x = residual_block(x, filters=256) x = residual_block(x, filters=512, strides=(2, 2)) x = residual_block(x, filters=512) x = residual_block(x, filters=512) x = BatchNormalization()(x) x = Activation('relu')(x) x = GlobalAveragePooling2D()(x) outputs = Dense(num_classes, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) return model ``` 以上代码实现了一个基本的残差网络,包含了多个残差块,每个残差块包含了两个卷积层和一个跳跃连接。可以根据实际情况进行修改和调整。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值