BP神经网络 + Tensorflow

将上一篇文章的代码翻译为Tensorflow。

链接:https://blog.csdn.net/seTaire/article/details/93760032

训练结果:

import numpy as np
import tensorflow as tf

def randomdata(classes, numberperclass, dimension):
        x = np.zeros((classes * numberperclass, dimension)) 
        y = np.zeros(classes * numberperclass, dtype='uint8')
        for j in range(classes):
            ix = list(range(numberperclass*j, numberperclass*(j + 1)))
            r = np.linspace(0.0, 1, numberperclass) 
            t = np.linspace(j*4, (j+1)*4, numberperclass) + np.random.randn(numberperclass)*0.2
            x[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
            y[ix] = j
        return x, np.eye(classes)[y]

if __name__ == '__main__':
    classes = 3
    numberperclass = 100
    dimension = 2
    hidden_number = 100
    step_size = 1
    reg = 0.001
    totalnumber = classes * numberperclass
    train_x, train_y = randomdata(classes, numberperclass, dimension)

    x = tf.placeholder("float", [totalnumber, dimension])
    y = tf.placeholder("float", [totalnumber, classes])
    
    W = tf.Variable(tf.random_normal([dimension, hidden_number]), trainable=True)
    b = tf.Variable(tf.random_normal([1, hidden_number]), trainable=True)

    hidden_layer = tf.nn.relu( tf.matmul(x, W) + b )

    W2 = tf.Variable(tf.random_normal([hidden_number, classes]), trainable=True)
    b2 = tf.Variable(tf.random_normal([1, classes]), trainable=True)

    probs = tf.nn.softmax( tf.matmul(hidden_layer, W2) + b2 )

    cross_entropy = tf.reduce_mean( tf.square(probs-y) )
    tf.add_to_collection('loss', cross_entropy)
    tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(W))
    tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(b))
    tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(W2))
    tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(b2))
    loss = tf.add_n(tf.get_collection("loss"))
        
    train_step = tf.train.GradientDescentOptimizer(step_size).minimize(loss) 

    correct_prediction = tf.equal(tf.argmax(probs,1), tf.argmax(y,1)) 
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 
    
        # 损失模型隐藏到loss-model模块
    with tf.name_scope("loss-model"):
        # 给损失模型的输出添加scalar,用来观察loss的收敛曲线
        tf.summary.scalar("loss", loss)

    with tf.name_scope("accuracy-model"):
        tf.summary.scalar("accuracy", accuracy)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # 调用 merge_all() 收集所有的操作数据
        merged = tf.summary.merge_all()
        # 模型运行产生的所有数据保存到 /tmp/tensorflow 文件夹供 TensorBoard 使用
        writer = tf.summary.FileWriter('/tmp/tensorflow', sess.graph)

        for i in range(1000):
            _, summary = sess.run([train_step, merged], feed_dict={x: train_x, y: train_y})
            writer.add_summary(summary, i)
            # if i % 100 == 0:
            #     print("test accuracy %g"%accuracy.eval(feed_dict={x:train_x, y: train_y}))
            #     print(loss.eval(feed_dict={x:train_x, y: train_y}))

 

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值