利用tensorflow建立简单的神经网络

import numpy as np
import tensorflow as tf

定义添加层的操作

def addLayer(inputs, in_c, output_size, activation_function = None):
    #in_r, in_c = inputs.shape[0],inputs.shape[1]
    W = tf.Variable(tf.random_normal([in_c, output_size], seed = 1))
    b = tf.Variable(tf.zeros([1, output_size]))
    xW_plus_b = tf.matmul(inputs,W) + b
    if activation_function != None:
        out = activation_function(xW_plus_b)
    else:
        out = xW_plus_b
    return out

生成训练数据

x_data = np.linspace(-1, 1, 200).reshape(-1,1)
noise = np.random.normal(0,1,[200,1])
y_data = np.square(x_data) + 0.5 

建立输入placeholder

xs = tf.placeholder(tf.float32,[None, 1])
ys = tf.placeholder(tf.float32,[None, 1])

建立两层网络结构:激活函数sigmoid

L1 = addLayer(xs, 1, 20, tf.sigmoid)
out = addLayer(L1, 20, 1)

建立loss函数和train的op操作

loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - out), reduction_indices = [1]))
train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
batch = 80

开始训练

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(100000):
        start = batch * i % x_data.shape[0]
        end = np.minimum(start + batch, x_data.shape[0])
        feed = {xs : x_data[start:end,:], ys: y_data[start:end,:]}
        _, printloss = sess.run([train_op,loss], feed_dict = feed)
        #print 'loss',printloss
        if i % 10000 == 0:
            pre = sess.run([out], feed_dict = {xs: np.array([[0.5],[0.3]])})
            printloss = sess.run(loss, feed_dict = {xs: x_data, ys: y_data})
            print 'total_loss',printloss
            print 'predict in 0.5, 0.3', pre
total_loss 31.1016
predict in 0.5, 0.3 [array([[ 8.13314533],
       [ 7.31019783]], dtype=float32)]
total_loss 0.000601287
predict in 0.5, 0.3 [array([[ 0.75571096],
       [ 0.57558763]], dtype=float32)]
total_loss 0.000264009
predict in 0.5, 0.3 [array([[ 0.7544269 ],
       [ 0.58114523]], dtype=float32)]
total_loss 0.000148942
predict in 0.5, 0.3 [array([[ 0.75366187],
       [ 0.5838505 ]], dtype=float32)]
total_loss 9.5837e-05
predict in 0.5, 0.3 [array([[ 0.75286418],
       [ 0.58528596]], dtype=float32)]
total_loss 6.60314e-05
predict in 0.5, 0.3 [array([[ 0.75213754],
       [ 0.58617651]], dtype=float32)]
total_loss 4.73021e-05
predict in 0.5, 0.3 [array([[ 0.75153911],
       [ 0.58682084]], dtype=float32)]
total_loss 3.47811e-05
predict in 0.5, 0.3 [array([[ 0.75105989],
       [ 0.58733034]], dtype=float32)]
total_loss 2.60982e-05
predict in 0.5, 0.3 [array([[ 0.75067317],
       [ 0.58774734]], dtype=float32)]
total_loss 1.9936e-05
predict in 0.5, 0.3 [array([[ 0.75036156],
       [ 0.58810079]], dtype=float32)]

重新建立激活函数为relu的网络

L1 = addLayer(xs, 1, 20, tf.nn.relu)
out = addLayer(L1, 20, 1)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - out), reduction_indices = [1]))
train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
batch = 80

开始训练

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(100000):
        start = batch * i % x_data.shape[0]
        end = np.minimum(start + batch, x_data.shape[0])
        feed = {xs : x_data[start:end,:], ys: y_data[start:end,:]}
        _, printloss = sess.run([train_op,loss], feed_dict = feed)
        #print 'loss',printloss
        if i % 10000 == 0:
            pre = sess.run([out], feed_dict = {xs: np.array([[0.5],[0.3]])})
            printloss = sess.run(loss, feed_dict = {xs: x_data, ys: y_data})
            print 'total_loss',printloss
            print 'predict in 0.5, 0.3', pre
total_loss 26.647
predict in 0.5, 0.3 [array([[ 7.41234827],
       [ 5.20346737]], dtype=float32)]
total_loss 0.0585625
predict in 0.5, 0.3 [array([[ 0.73315507],
       [ 0.71946353]], dtype=float32)]
total_loss 0.0585625
predict in 0.5, 0.3 [array([[ 0.73315299],
       [ 0.71946359]], dtype=float32)]
total_loss 0.0585625
predict in 0.5, 0.3 [array([[ 0.73315114],
       [ 0.71946341]], dtype=float32)]
total_loss 0.0585625
predict in 0.5, 0.3 [array([[ 0.73314917],
       [ 0.71946341]], dtype=float32)]
total_loss 0.0585625
predict in 0.5, 0.3 [array([[ 0.7331475 ],
       [ 0.71946341]], dtype=float32)]
total_loss 0.0585625
predict in 0.5, 0.3 [array([[ 0.73314548],
       [ 0.71946317]], dtype=float32)]
total_loss 0.0585624
predict in 0.5, 0.3 [array([[ 0.73314357],
       [ 0.71946317]], dtype=float32)]
total_loss 0.0585624
predict in 0.5, 0.3 [array([[ 0.73314178],
       [ 0.71946317]], dtype=float32)]
total_loss 0.0585624
predict in 0.5, 0.3 [array([[ 0.73314029],
       [ 0.71946293]], dtype=float32)]

实验表明:在此回归问题上,tf.sigmoid比tf.nn.relu效果更好。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值