最近学了一下tensorflow的基本用法,这里做一下总结
全连接深度神经网络(FC-DNN)
全连接深度神经网络,每一层的神经元直接都是全连接,并且不共享权值。在普通的分类的问题中表现的不错,但是对于图片处理等具有网格形式的数据,最好采用CNN(卷积神经网络),对于序列化数据如NLP(自然语言处理)、文字分析等采用RNN(循环神经网络)表现更佳。
DNN用tensorflow的实现代码如下。
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
# 数据分析
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
LOG_DIR = "logs"
IMAGE_NUM = 10
# 每个批次大小
batch_size = 100
n_batch = mnist.train.num_examples // batch_size
print(n_batch)
# 定义两个placeholder
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
dropout = tf.placeholder(tf.float32)
learning_rate = tf.Variable(1e-3)
hidden_num = 20
# 输入到隐藏层
w_hidden = tf.Variable(tf.truncated_normal([784, hidden_num], stddev=0.2))
b_hidden = tf.Variable(tf.zeros([hidden_num]) + 0.1)
o_hidden = tf.nn.sigmoid(tf.matmul(x, w_hidden) + b_hidden)
h_dropout = tf.nn.dropout(o_hidden, dropout)
# # 隐藏层到输出层的权重
w = tf.Variable(tf.truncated_normal([hidden_num, 10], stddev=0.1))
b = tf.Variable(tf.zeros([10])+0.1)
o = tf.matmul(h_dropout, w) + b
prediction = o
# 定义代价函数
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=prediction))
# 使用梯度下降法
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
correct_predict = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))
with tf.Session() as sess:
#writer = tf.summary.FileWriter('logs/', sess.graph)
sess.run(tf.global_variables_initializer())
for epoch in range(n_batch):
sess.run(tf.assign(learning_rate, (learning_rate * 0.95)