将上一篇文章的代码翻译为Tensorflow。
链接:https://blog.csdn.net/seTaire/article/details/93760032
训练结果:
import numpy as np
import tensorflow as tf
def randomdata(classes, numberperclass, dimension):
x = np.zeros((classes * numberperclass, dimension))
y = np.zeros(classes * numberperclass, dtype='uint8')
for j in range(classes):
ix = list(range(numberperclass*j, numberperclass*(j + 1)))
r = np.linspace(0.0, 1, numberperclass)
t = np.linspace(j*4, (j+1)*4, numberperclass) + np.random.randn(numberperclass)*0.2
x[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ix] = j
return x, np.eye(classes)[y]
if __name__ == '__main__':
classes = 3
numberperclass = 100
dimension = 2
hidden_number = 100
step_size = 1
reg = 0.001
totalnumber = classes * numberperclass
train_x, train_y = randomdata(classes, numberperclass, dimension)
x = tf.placeholder("float", [totalnumber, dimension])
y = tf.placeholder("float", [totalnumber, classes])
W = tf.Variable(tf.random_normal([dimension, hidden_number]), trainable=True)
b = tf.Variable(tf.random_normal([1, hidden_number]), trainable=True)
hidden_layer = tf.nn.relu( tf.matmul(x, W) + b )
W2 = tf.Variable(tf.random_normal([hidden_number, classes]), trainable=True)
b2 = tf.Variable(tf.random_normal([1, classes]), trainable=True)
probs = tf.nn.softmax( tf.matmul(hidden_layer, W2) + b2 )
cross_entropy = tf.reduce_mean( tf.square(probs-y) )
tf.add_to_collection('loss', cross_entropy)
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(W))
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(b))
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(W2))
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(b2))
loss = tf.add_n(tf.get_collection("loss"))
train_step = tf.train.GradientDescentOptimizer(step_size).minimize(loss)
correct_prediction = tf.equal(tf.argmax(probs,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# 损失模型隐藏到loss-model模块
with tf.name_scope("loss-model"):
# 给损失模型的输出添加scalar,用来观察loss的收敛曲线
tf.summary.scalar("loss", loss)
with tf.name_scope("accuracy-model"):
tf.summary.scalar("accuracy", accuracy)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 调用 merge_all() 收集所有的操作数据
merged = tf.summary.merge_all()
# 模型运行产生的所有数据保存到 /tmp/tensorflow 文件夹供 TensorBoard 使用
writer = tf.summary.FileWriter('/tmp/tensorflow', sess.graph)
for i in range(1000):
_, summary = sess.run([train_step, merged], feed_dict={x: train_x, y: train_y})
writer.add_summary(summary, i)
# if i % 100 == 0:
# print("test accuracy %g"%accuracy.eval(feed_dict={x:train_x, y: train_y}))
# print(loss.eval(feed_dict={x:train_x, y: train_y}))