搭建神经网络流程:
1.加载训练数据,并预处理(对于图像等数据,可以直接转化为矩阵,或者通过tf.convert_to_tensor()将其转换为tensor数据类型处理);
2.构建网络层,如conv,pool,relu,lrn,fc等,在此处需要设置相应层的权重和偏置;
比较喜欢的构建方式如下(以定义AlexNet为例):
import tensorflow as tf
BATCH_SIZE = 200
def bias(name, shape, bias_start=0.0, trainable=True):
dtype = tf.float32
var = tf.get_variable(name, shape, tf.float32, trainable=trainable,
initializer=tf.constant_initializer(
bias_start, dtype=dtype))
return var
def weight(name, shape, stddev=0.02, trainable=True):
dtype = tf.float32
var = tf.get_variable(name, shape, tf.float32, trainable=trainable,
initializer=tf.random_normal_initializer(
stddev=stddev, dtype=dtype))
return var
def fully_connected(value, output_shape, name='fully_connected', with_w=False):
value = tf.reshape(value, [BATCH_SIZE, -1])
shape = value.get_shape().as_list()
with tf.variable_scope(name):
weights = weight('weights', [shape[1], output_shape], 0.02)
biases = bias('biases', [output_shape], 0.0)
if with_w:
return tf.matmul(value, weights) + biases, weights, biases
else:
return tf.matmul(value, weights) + biases
def relu(value, name='relu'):
with tf.variable_scope(name):
return tf.nn.relu(value)
def conv2d(value, output_dim, k_h=5, k_w=5,
strides=[1, 1, 1, 1], name='conv2d'):
with tf.variable_scope(name):
weights = weight('weights',
[k_h, k_w, value.get_shape()[-1], output_dim])
conv = tf.nn.conv2d(value, weights, strides=strides, padding='SAME')
biases = bias('biases', [output_dim])
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
def pool(value, k_size=[1, 3, 3, 1],strides=[1, 2, 2, 1], name='pool1'):
with tf.variable_scope(name):
pool = tf.nn.max_pool(value, ksize=k_size, strides=strides, padding='VALID')
return pool
def pool_avg(value, k_size=[1, 3, 3, 1],strides=[1, 2, 2, 1], name='pool1'):
with tf.variable_scope(name):
pool = tf.nn.avg_pool(value, ksize=k_size, strides=strides, padding='VALID')
return pool
def lrn(value, depth_radius=1, alpha=5e-05, beta=0.75, name='lrn1'):
with tf.variable_scope(name):
n