#coding=utf-8 import numpy as np from numpy.random import RandomState import tensorflow as tf w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1)) b1=tf.Variable(tf.constant(0.1, shape=[2])) w2=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1)) b2=tf.Variable(tf.constant(0.1, shape=[1])) x=tf.placeholder(dtype=tf.float32, shape=(None,2), name='x-input')# y_=tf.placeholder(dtype=tf.float32, shape=(None,1), name='y-output')# print(tf.shape(x)) a=tf.matmul(x+b1,w1) a_out=tf.nn.relu(a) y=tf.matmul(a_out+b2,w2) dataset_size=128 rdm=RandomState(1) X=rdm.rand(dataset_size, 2) # print(X) Y=[[int(x1+x2>1)] for (x1,x2) in X] # print(Y) cross_entropy=-tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-10,1.0))) train_step=tf.train.AdamOptimizer(0.01).minimize(cross_entropy) init_op=tf.global_variables_initializer() sess=tf.Session() sess.run(init_op) print('w1 w2:') print(sess.run(w1)) print(sess.run(w2)) print('b1 b2:') print(sess.run(b1)) print(sess.run(b2)) for i in range(10): result=sess.run(train_step,feed_dict={x:X[0:128],y_:Y[0:128]}) loss=sess.run(cross_entropy,feed_dict={x:X[0:128],y_:Y[0:128]}) print(loss) print('w1 w2:') print(sess.run(w1)) print(sess.run(w2)) print('b1 b2:') print(sess.run(b1)) print(sess.run(b2)) sess.close()
TensorFlow 实现MLP
最新推荐文章于 2024-09-09 06:00:00 发布