TensorFlow 实现MLP

#coding=utf-8

import numpy as np
from numpy.random import RandomState
import tensorflow as tf

w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
b1=tf.Variable(tf.constant(0.1, shape=[2]))
w2=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))
b2=tf.Variable(tf.constant(0.1, shape=[1]))

x=tf.placeholder(dtype=tf.float32, shape=(None,2), name='x-input')#
y_=tf.placeholder(dtype=tf.float32, shape=(None,1), name='y-output')#
print(tf.shape(x))

a=tf.matmul(x+b1,w1)
a_out=tf.nn.relu(a)
y=tf.matmul(a_out+b2,w2)

dataset_size=128
rdm=RandomState(1)
X=rdm.rand(dataset_size, 2)
# print(X)
Y=[[int(x1+x2>1)] for (x1,x2) in X]
# print(Y)

cross_entropy=-tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
train_step=tf.train.AdamOptimizer(0.01).minimize(cross_entropy)
init_op=tf.global_variables_initializer()
sess=tf.Session()
sess.run(init_op)
print('w1 w2:')
print(sess.run(w1))
print(sess.run(w2))
print('b1 b2:')
print(sess.run(b1))
print(sess.run(b2))
for i in range(10):
    result=sess.run(train_step,feed_dict={x:X[0:128],y_:Y[0:128]})
    loss=sess.run(cross_entropy,feed_dict={x:X[0:128],y_:Y[0:128]})
    print(loss)
print('w1 w2:')
print(sess.run(w1))
print(sess.run(w2))
print('b1 b2:')
print(sess.run(b1))
print(sess.run(b2))
sess.close()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值