TensorFlow学习笔记之基本神经网络搭建

import matplotlib.pyplot as plt
from  tensorflow.examples.tutorials.mnist  import  input_data
import numpy as np 

mnist = input_data.read_data_sets('data', one_hot = True)

num_classes = 10
input_size = 784 
hidden_units_size = 30
batch_size = 100
training_iterations = 10000


#定义变量和参数
x = tf.placeholder(tf.float32, [None, input_size])  
y = tf.placeholder (tf.float32, [None, num_classes])

#添加隐藏层
w1 = tf.Variable(tf.random_normal([input_size, hidden_units_size],stddev = 0.1))
b1 = tf.Variable(tf.constant (0.1), [hidden_units_size])
hidden_opt = tf.matmul(x, w1) + b1
hidden_opt = tf.nn.relu(hidden_opt)

#添加输出层
w2 = tf.Variable(tf.random_normal([hidden_units_size, num_classes],stddev = 0.1))
b2 = tf.Variable(tf.constant (0.1), [num_classes])
out_opt = tf.matmul(hidden_opt, w2) + b2
out_opt = tf.nn.relu(out_opt)

#定义损失函数
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = out_opt))

#训练过程
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss)

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax (out_opt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

#初始化变量
init = tf.global_variables_initializer() 
sess = tf.Session()
sess.run(init) #执行图计算

for i in range(training_iterations):
    batch = mnist.train.next_batch (batch_size)
    batch_input = batch[0]
    batch_labels = batch[1]
    training_loss = sess.run ([train_step, loss], feed_dict = {x:batch_input, y:batch_labels})
    if i % 1000 == 0 :
        train_accuracy = accuracy.eval (session = sess, feed_dict = {x:batch_input, y:batch_labels})
        print ("step : %d, training accuracy = %g " % (i, train_accuracy))
sess.close()
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值