shape变化
(?, 784)
(?, 28, 28, 1)
(?, 14, 14, 8)
(?, 7, 7, 8)
(?, 1, 1, 8)
(?, 8)
(?, 10)
结果对比,基本上还是有点用的
0.91725457 0.9232 0.9548 0.9553
0.9177273 0.9234 0.9556909 0.9555
0.9190364 0.9245 0.95681816 0.9557
参考代码layers
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 载入数据集
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def get_net(x):
print(x.shape)
x = tf.reshape(x, (-1, 28, 28, 1))
print(x.shape)
net = tf.layers.conv2d(x, 8, 3, 2, "SAME", activation=tf.nn.leaky_relu)
print(net.shape)
net = tf.layers.conv2d(net, 8, 3, 2, "SAME", activation=tf.nn.leaky_relu)
print(net.shape)
net = tf.layers.conv2d(net, 8, 5, 7, "SAME", activation=tf.nn.leaky_relu)
print(net.shape)
net = tf.layers.flatten(net)
print(net.shape)
net = tf.layers.dense(net, 10, activation=tf.nn.leaky_relu)
print(net.shape)
return net
def get_net2(x):
print(x.shape)
x = tf.reshape(x, (-1, 28, 28, 1))
x = tf.layers.batch_normalization(x, training=True)
print(x.shape)
net = tf.layers.conv2d(x, 8, 3, 2, "SAME", activation=tf.nn.leaky_relu)
net = tf.layers.batch_normalization(net, training=True)
print(net.shape)
net = tf.layers.conv2d(net, 8, 3, 2, "SAME", activation=tf.nn.leaky_relu)
net = tf.layers.batch_normalization(net, training=True)
print(net.shape)
net = tf.layers.conv2d(net, 8, 5, 7, "SAME", activation=tf.nn.leaky_relu)
net = tf.layers.batch_normalization(net, training=True)
print(net.shape)
net = tf.layers.flatten(net)
print(net.shape)
net = tf.layers.dense(net, 10, activation=tf.nn.leaky_relu)
print(net.shape)
return net
train_num = 10000
batch_size = 100
show_num = 200
learning_rate = .0001
in_x = tf.placeholder(tf.float32, (None, 784))
in_y = tf.placeholder(tf.float32, (None, 10))
out_y = get_net(in_x)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=in_y, logits=out_y)
train_opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
predict = tf.equal(tf.argmax(out_y, 1), tf.argmax(in_y, 1))
accuracy = tf.reduce_mean(tf.cast(predict, tf.float32))
out_y2 = get_net2(in_x)
loss2 = tf.nn.softmax_cross_entropy_with_logits(labels=in_y, logits=out_y2)
train_opt2 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss2)
predict2 = tf.equal(tf.argmax(out_y2, 1), tf.argmax(in_y, 1))
accuracy2 = tf.reduce_mean(tf.cast(predict2, tf.float32))
print(tf.trainable_variables())
print(len(tf.trainable_variables()))
print(tf.global_variables())
print(len(tf.global_variables()))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(train_num):
batch = mnist.train.next_batch(batch_size)
sess.run(
[train_opt, train_opt2], feed_dict={
in_x: batch[0],
in_y: batch[1],
})
if not (i + 1) % show_num:
acc_train1, acc_train2 = sess.run(
[accuracy, accuracy2], feed_dict={
in_x: mnist.train.images,
in_y: mnist.train.labels,
}
)
acc_test1, acc_test2 = sess.run(
[accuracy, accuracy2], feed_dict={
in_x: mnist.test.images,
in_y: mnist.test.labels,
}
)
print(acc_train1, acc_test1, acc_train2, acc_test2)
slim 函数对比
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow.contrib.slim as slim
# 载入数据集
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def get_net(x):
print(x.shape)
x = tf.reshape(x, (-1, 28, 28, 1))
print(x.shape)
# net = tf.layers.conv2d(x, 16, 3, 2, "SAME", activation=tf.nn.leaky_relu)
net = slim.conv2d(x, 16, 3, 2, "SAME", activation_fn=tf.nn.leaky_relu)
print(net.shape)
# net = tf.layers.conv2d(net, 16, 3, 2, "SAME", activation=tf.nn.leaky_relu)
net = slim.conv2d(net, 16, 3, 2, "SAME", activation_fn=tf.nn.leaky_relu)
print(net.shape)
# net = tf.layers.conv2d(net, 16, 7, 7, "SAME", activation=tf.nn.leaky_relu)
net = slim.conv2d(net, 16, 5, 7, "SAME", activation_fn=tf.nn.leaky_relu)
print(net.shape)
# net = tf.layers.flatten(net)
net = slim.flatten(net)
print(net.shape)
# net = tf.layers.dense(net, 10, activation=tf.nn.leaky_relu)
net = slim.fully_connected(net, 10, activation_fn=tf.nn.leaky_relu)
print(net.shape)
return net
train_num = 10000
batch_size = 100
show_num = 200
learning_rate = .0001
in_x = tf.placeholder(tf.float32, (None, 784))
in_y = tf.placeholder(tf.float32, (None, 10))
out_y = get_net(in_x)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=in_y, logits=out_y)
train_opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
predict = tf.equal(tf.argmax(out_y, 1), tf.argmax(in_y, 1))
accuracy = tf.reduce_mean(tf.cast(predict, tf.float32))
print(tf.trainable_variables())
print(len(tf.trainable_variables()))
print(tf.global_variables())
print(len(tf.global_variables()))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(train_num):
batch = mnist.train.next_batch(batch_size)
sess.run(
[train_opt], feed_dict={
in_x: batch[0],
in_y: batch[1],
})
if not (i + 1) % show_num:
acc_train1 = sess.run(
accuracy, feed_dict={
in_x: mnist.train.images,
in_y: mnist.train.labels,
}
)
acc_test1 = sess.run(
accuracy, feed_dict={
in_x: mnist.test.images,
in_y: mnist.test.labels,
}
)
print(acc_train1, acc_test1)
转载至链接:https://my.oschina.net/ahaoboy/blog/1929819