MNSIT手写数字识别


基于TensorFlow的手写数字识别


import struct
import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
from PIL import Image, ImageFilter
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

# 每个批次的大小
batch_size = 100
n_batch = mnist.train.num_examples // batch_size


# 初始化权值
def weight_variable(shape):
    inital = tf.truncated_normal(shape, stddev=0.1)  # 生成一个阶段的正态分布
    return tf.Variable(inital)


# 初始化偏置值
def bias_variable(shape):
    inital = tf.constant(0.1, shape=shape)
    return tf.Variable(inital)


# 卷积层

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


# 池化层
def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


# 定义两个placeholder
# 数据类型, [有多少行,多少列]
x = tf.placeholder(tf.float32, [None, 784])  # 28*28
y = tf.placeholder(tf.float32, [None, 10])

# 改变x的格式转为4D的向量[batch, in_height, in_width, in_channels]
x_image = tf.reshape(x, [-1, 28, 28, 1])

# 初始化第一个卷积层的权值跟偏置
W_conv1 = weight_variable([5, 5, 1, 32])  # 5*5的采样窗口, 32个卷积核从1个平面抽取特征
b_conv1 = bias_variable([32])  # 每一个卷积核一个偏置值

# 把x_image 和 权值向量进行卷积,再加上偏置值,然后应用于relu激活函数
conv1 = conv2d(x_image, W_conv1) + b_conv1
h_conv1 = tf.nn.relu(conv1)
h_pool1 = max_pool_2x2(h_conv1)  # 进行max_pooling

# 初始化第二个卷积层的权值跟偏置值

W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])

# 把h_pool1 和权值向量进行卷积,再加上偏置值,然后用relu函数进行激活
conv2 = conv2d(h_pool1, W_conv2) + b_conv2
h_conv2 = tf.nn.relu(conv2)
h_pool2 = max_pool_2x2(h_conv2)

# 28 * 28 的图片第一次进行卷积后还是28*28, 第一次池化后变为14*14
# 进过第二次卷积后为14*14 第二次池化后变为7*7
# 进过上面的操作后 可以得到647*7的平面

# 初始化第一个全连接层的权值
W_fc1 = weight_variable([7 * 7 * 64, 1024])  # 上一场有7*7*64个神经元,全连接有1024个神经元
b_fc1 = bias_variable([1024])  # 1024个节点

# 把池化层2的输出扁平化1维
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])

# 求第一个全连接层的输出
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# keep_prob 用来表示神经元的输出概率

keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# 初始化第二个全连接层
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

# 计算输出
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

# 交叉熵代价函数
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
# 使用AdamOptimizer 进行优化
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

# 结果存放在一个布尔列表中
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))  # argmax返回一维张量中最大值所在的位置

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

"""
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(21):
        for batch in range(n_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            testSet = mnist.test.next_batch(batch_size)
            sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.7})

        acc = sess.run(accuracy, feed_dict={x: testSet[0], y: testSet[1], keep_prob: 1.0})
        print("Iter" + str(epoch) + ", Testing Accuracy=" + str(acc))
   """保存模型"""
     model_filepath = "./model2/gesture.ckpt"
     saver = tf.train.Saver()
     save_path = saver.save(sess, model_filepath)
"""

file_name = 'F:/Deep Learning/testImages/testImages/testIM6.jpg'
im = Image.open(file_name).convert('L')
img = np.array(im)
new_im = Image.fromarray(img)
new_im.show()

result = img.reshape(784, )

saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    saver.restore(sess, "./model2/gesture.ckpt")
    prediction2 = tf.argmax(prediction, 1)
    predint = prediction2.eval(feed_dict={x:[result], keep_prob: 1.0}, session=sess)
    print('recognize result:')
    print(predint)


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值