参考曹健老师的tensorFlow公开课
第三节的代码
主要是介绍了构建神经网络的流程
代码简单,但是流程值得记住
特别是关键的函数需要记忆
#coding:utf-8
#1、导入模块、生成数据集
import tensorflow as tf
import numpy as np
BATCH_SIZE = 8
seed = 23455
#基于see产生随机数
rng = np.random.RandomState(seed)
#随机数返回32行,2列的矩阵 表示32组,体积和重量 作为输入数据集
X = rng.rand(32,2)
#根据X人工生成Y,作为训练数据标签
Y = [[int(X0 + X1 <1)] for(X0,X1) in X]
#????这种写法
print("X:\n",X)
print("Y:\n",Y)
#2 定义什么网络的前向传播过程:输入、参数、输出、网络
x = tf.placeholder(tf.float32,shape=(None,2))
y_ = tf.placeholder(tf.float32,shape=(None,1))
w1 = tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
w2 = tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))
a = tf.matmul(x,w1)
y = tf.matmul(a,w2)
#3定义反向传播
loss = tf.reduce_mean(tf.square(y-y_))
#写loss的时候犯了一个错误,就是 y-y_ 写成了 Y-y_ Y是训练数据的存储,不是计算图中的变量
# y是计算图中的输出,y_是计算图的输入
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
#or
# train_step = tf.train.MomentumOptimizer(0.001,0.9).minimize(loss)
# train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
#4生成会话,训练steps轮
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
#输出目前未经计算的参数的数值
print("w1:\n",sess.run(w1))
print("w1:\n", sess.run(w2))
print("\n")
#训练模型
STEPS = 3000
for i in range(STEPS):
start = (i*BATCH_SIZE)%32
end = start +BATCH_SIZE
sess.run(train_step,feed_dict={x:X[start:end],y_:Y[start:end]})
if i%500 == 0:
total_loss = sess.run(loss,feed_dict={x:X,y_:Y})
print("After %d traning steps,loss on all data is %g"%(i,total_loss))
print("\n")
print("w1:\n",sess.run(w1))
print("w2:\n",sess.run(w2))