import tensorflow as tf
import numpy as np
# zhonglihao 2019.04
# 训练集
x_train = [[0.0,0.0],[0.0,1.0],[1.0,0.0],[1.0,1.0]]
y_train = [[1],[0],[0],[1]]
# 网络参数 输入向量为2 隐藏层为4个cell 输出1/0单个节点
# zhonglihao: 使用2个隐藏cell也能学 但很容易卡死 所以这里用得多点 训练的时候容易收敛
Input_Node = 2
Layer1_Node = 4 # 4颗比较稳
Output_Node = 1
# 输入输出动态占位符
x = tf.placeholder(tf.float32, [None, Input_Node])
y_ = tf.placeholder(tf.float32, [None, Output_Node])
# 定义网络参数变量
w1 = tf.Variable(tf.random_normal([Input_Node, Layer1_Node]))
w2 = tf.Variable(tf.random_normal([Layer1_Node, Output_Node]))
b1 = tf.Variable(tf.zeros([Layer1_Node]))
b2 = tf.Variable(tf.zeros([Output_Node]))
# 进行神经网络相乘并使用激活函数sigmoid
Layer1_Output = tf.sigmoid(tf.matmul(x,w1)+b1)
pred = tf.sigmoid(tf.matmul(Layer1_Output,w2)+b2)
# 计算损失,这里使用方差
cost = tf.reduce_mean(tf.square(y_ - pred))
# 学习率
learning_rate = 0.01
# 优化器
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# 全局变量初始化
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# 自定义训练轮数
max_step = 10000
for i in range(max_step + 1):
sess.run(train_step, feed_dict = {x: x_train, y_: y_train})
loss = sess.run(cost, feed_dict = {x: x_train, y_: y_train})
if (i % 100 == 0):
print(sess.run(pred, feed_dict = {x: x_train}))
print('end')