## 神经网路的框架
也就是一个 1输入端,(自带比照)
2.传播层,也就是参数传播
3.输出层 没了 这只是个工具。
**
## 神经网路的框架
也就是一个 1输入端,(自带比照) 2.传播层,也就是参数传播 3.输出层 没了 这只是个工具。
**
以下是手写数字的训练
```
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
shujuji = input_data.read_data_sets(r"D:\超算\手写数字的数据集合\MNIST_data", one_hot=True)
data_len = 100
data_n = shujuji.train.num_examples // data_len
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
W1 = tf.Variable(tf.random.normal([784,500]))
pian1 = tf.Variable(tf.zeros([500]))
result1 = tf.nn.sigmoid(tf.matmul(x,W1)+pian1)
W2 = tf.Variable(tf.random.normal([500,500]))
pian2 = tf.Variable(tf.zeros([500]))
result2 = tf.nn.sigmoid(tf.matmul(result1,W2)+pian2)
W3 = tf.Variable(tf.random.normal([500,500]))
pian3 = tf.Variable(tf.zeros([500]))
result3 = tf.nn.sigmoid(tf.matmul(result2,W3)+pian3)
W4 = tf.Variable(tf.random.normal([500,500]))
pian4 = tf.Variable(tf.zeros([500]))
result4 = tf.nn.sigmoid(tf.matmul(result3,W4)+pian4)
W = tf.Variable(tf.zeros([500, 10]))
pian = tf.Variable(tf.zeros([10]))
result = tf.nn.softmax(tf.matmul(result4, W) + pian)
init = tf.global_variables_initializer()
train = tf.train.
GradientDescentOptimizer(0.01).minimize(tf.reduce_mean(tf.square(result - y)))
acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(result, 1)), tf.float32))
with tf.Session() as sess:
sess.run(init)
for _ in range(100000):
for __ in range(data_n):
shuju, lable = shujuji.train.next_batch(data_len)
sess.run(train, feed_dict={x: shuju, y: lable})
if _ % 1 == 0:
print("I:", _,sess.run(acc, feed_dict={x: shujuji.test.images, y: shujuji.test.labels}))
```