7 多层全连接神经网络

单层网络的模型

7.1 idcard1
程序:

import tensorflow as tf
import random


random.seed()

x = tf.placeholder(tf.float32)
yTrain = tf.placeholder(tf.float32)

w = tf.Variable(tf.random_normal([4], mean=0.5, stddev=0.1), dtype=tf.float32)
b = tf.Variable(0, dtype=tf.float32)

n1 = w * x + b

y = tf.nn.sigmoid(tf.reduce_sum(n1))

loss = tf.abs(y - yTrain)

optimizer = tf.train.RMSPropOptimizer(0.01)

train = optimizer.minimize(loss)

sess = tf.Session()

sess.run(tf.global_variables_initializer())

lossSum = 0.0#训练中误差的总和

for i in range(5000):

    xDataRandom = [int(random.random() * 10), int(random.random() * 10), int(random.random() * 10), int(random.random() * 10)]
    if xDataRandom[2] % 2 == 0:#xDataRandom第3项除以2取余
        yTrainDataRandom = 0
    else:
        yTrainDataRandom = 1

    result = sess.run([train, x, yTrain, y, loss], feed_dict={x: xDataRandom, yTrain: yTrainDataRandom})

    lossSum = lossSum + float(result[len(result) - 1])

    print("i: %d, loss: %10.10f, avgLoss: %10.10f" % (i, float(result[len(result) - 1]), lossSum / (i + 1)))

结果:

...
i: 4993, loss: 0.9928429127, avgLoss: 0.4535896228
i: 4994, loss: 0.0246321186, avgLoss: 0.4535037454
i: 4995, loss: 0.9965691566, avgLoss: 0.4536124455
i: 4996, loss: 0.0063358545, avgLoss: 0.4535229365
i: 4997, loss: 0.1029482484, avgLoss: 0.4534527935
i: 4998, loss: 0.0965303779, avgLoss: 0.4533813947
i: 4999, loss: 0.9565555453, avgLoss: 0.4534820295

身份证问题新模型的代码实现

7.2 idcard2
程序:

import tensorflow as tf
import random


random.seed()

x = tf.placeholder(tf.float32)
yTrain = tf.placeholder(tf.float32)

w1 = tf.Variable(tf.random_normal([4, 8], mean=0.5, stddev=0.1), dtype=tf.float32)
b1 = tf.Variable(0, dtype=tf.float32)

xr = tf.reshape(x, [1, 4])#将x转换为一个形态为[1,4]的矩阵并保存在xr中

n1 = tf.nn.tanh(tf.matmul(xr, w1)  + b1)

w2 = tf.Variable(tf.random_normal([8, 2], mean=0.5, stddev=0.1), dtype=tf.float32)
b2 = tf.Variable(0, dtype=tf.float32)

n2 = tf.matmul(n1, w2) + b2

y = tf.nn.softmax(tf.reshape(n2, [2]))

loss = tf.reduce_mean(tf.square(y - yTrain))

optimizer = tf.train.RMSPropOptimizer(0.01)

train = optimizer.minimize(loss)

sess = tf.Session()

sess.run(tf.global_variables_initializer())

lossSum = 0.0

for i in range(5):

    xDataRandom = [int(random.random() * 10), int(random.random() * 10), int(random.random() * 10), int(random.random() * 10)]
    if xDataRandom[2] % 2 == 0:
        yTrainDataRandom = [0, 1]
    else:
        yTrainDataRandom = [1, 0]

    result = sess.run([train, x, yTrain, y, loss], feed_dict={x: xDataRandom, yTrain: yTrainDataRandom})

    lossSum = lossSum + float(result[len(result) - 1])

    print("i: %d, loss: %10.10f, avgLoss: %10.10f" % (i, float(result[len(result) - 1]), lossSum / (i + 1)))

结果:

i: 0, loss: 0.1806790531, avgLoss: 0.1806790531
i: 1, loss: 0.3404206634, avgLoss: 0.2605498582
i: 2, loss: 0.3263188601, avgLoss: 0.2824728588
i: 3, loss: 0.1949076355, avgLoss: 0.2605815530
i: 4, loss: 0.1858262867, avgLoss: 0.2456304997

进一步优化模型和代码

7.3 idcard3
程序:

import tensorflow as tf
import random


random.seed()

x = tf.placeholder(tf.float32)
yTrain = tf.placeholder(tf.float32)

w1 = tf.Variable(tf.random_normal([4, 32], mean=0.5, stddev=0.1), dtype=tf.float32)
b1 = tf.Variable(0, dtype=tf.float32)

xr = tf.reshape(x, [1, 4])

n1 = tf.nn.tanh(tf.matmul(xr, w1)  + b1)

w2 = tf.Variable(tf.random_normal([32, 32], mean=0.5, stddev=0.1), dtype=tf.float32)
b2 = tf.Variable(0, dtype=tf.float32)

n2 = tf.nn.sigmoid(tf.matmul(n1, w2) + b2)

w3 = tf.Variable(tf.random_normal([32, 2], mean=0.5, stddev=0.1), dtype=tf.float32)
b3 = tf.Variable(0, dtype=tf.float32)

n3 = tf.matmul(n2, w3) + b3

y = tf.nn.softmax(tf.reshape(n3, [2]))

loss = tf.reduce_mean(tf.square(y - yTrain))

optimizer = tf.train.RMSPropOptimizer(0.01)

train = optimizer.minimize(loss)

sess = tf.Session()

sess.run(tf.global_variables_initializer())

lossSum = 0.0

for i in range(500000):

    xDataRandom = [int(random.random() * 10), int(random.random() * 10), int(random.random() * 10), int(random.random() * 10)]
    if xDataRandom[2] % 2 == 0:
        yTrainDataRandom = [0, 1]
    else:
        yTrainDataRandom = [1, 0]

    result = sess.run([train, x, yTrain, y, loss], feed_dict={x: xDataRandom, yTrain: yTrainDataRandom})

    lossSum = lossSum + float(result[len(result) - 1])

    print("i: %d, loss: %10.10f, avgLoss: %10.10f" % (i, float(result[len(result) - 1]), lossSum / (i + 1)))

结果:

...
i: 499990, loss: 0.1229834557, avgLoss: 0.1387792120
i: 499991, loss: 0.0000002130, avgLoss: 0.1387789344
i: 499992, loss: 0.0725839064, avgLoss: 0.1387788020
i: 499993, loss: 0.0478919595, avgLoss: 0.1387786202
i: 499994, loss: 0.0000000044, avgLoss: 0.1387783427
i: 499995, loss: 0.0000000061, avgLoss: 0.1387780651
i: 499996, loss: 0.0000002700, avgLoss: 0.1387777876
i: 499997, loss: 0.0000000061, avgLoss: 0.1387775100
i: 499998, loss: 0.0347102024, avgLoss: 0.1387773019
i: 499999, loss: 0.0250975303, avgLoss: 0.1387770745
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值