import tensorflow as tf
from numpy.random import RandomState
#输入层(设有3个输入变量1x3)
rdm = RandomState(1)
IN = rdm.rand(10000,3) #随机生层10个3维数组,取值范围在0~1之间;
x =tf.placeholder(tf.float32,shape=(None,3),name='x_input')
#设定输入层到隐含层随机权重(设隐含层有3个神经元3x3)
in_to_hd_w1 = tf.Variable(tf.random_normal([3,3], stddev=1,seed=1))
#隐含层3个神经元的值(1x3)
hd_layer = tf.matmul(x,in_to_hd_w1)
#隐含层到输出层随机权重(3x1)
hd_to_out_w2 = tf.Variable(tf.random_normal([3,1], stddev=1,seed=1))
#训练输出值,输出值Y非零即1
OUT = [[int(x1+x2+x3<1)] for (x1,x2,x3) in IN]
y = tf.placeholder(tf.float32,shape=(None,1),name='y_output')
#仅有一个输出值(1x1)
yy = tf.matmul(hd_layer,hd_to_out_w2)
sess = tf.Session()
#注:初始话全部函数
sess.run(tf.global_variables_initializer());
#训练
#损失函数
learning_rate = 0.001
data_size = 10
loss = -tf.reduce_mean(y*tf.log(tf.clip_by_value(yy,1e-10,1.0)))
training = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss);
#设定训练的轮数
time_s = 1000
#设定每次训练的个数
batch = 10
for i in range(0,10000,1):
sess.run(training,feed_dict={x:IN[i:(i+1)*data_size],y:OUT[i:(i+1)*data_size]})
if(i%1000==0):
print("loss is:")
print(sess.run(loss,feed_dict={x:IN,y:OUT}))
#print("in value is:")
#print(IN[i])
#print("out value is:")
#print(OUT[i])
#print("predict out is:")
#print(sess.run(yy,feed_dict={x:IN[i:i+1]}))
运行结果:
loss is:
1.44677
loss is:
1.02309
loss is:
0.737368
loss is:
0.49516
loss is:
0.037638
loss is:
0.0149277
loss is:
0.0121234
loss is:
0.010189
loss is:
0.00865641
loss is:
0.00752897