神经网络入门-mnist识别实例

神经网络入门实例:
1.单层神经网络mnist手写数字识别实例

mnist_data数据集为
链接:https://pan.baidu.com/s/1ujUNC2_xbTnfxsKNHj6p0A
提取码:nccu
复制这段内容后打开百度网盘手机App,操作更方便哦

FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer("is_train", 0, "Yuce ")

def fullconnected():
    # 从文件目录./data/mnist_data中读取mnist数据,并采用独热编码格式
    mnist = input_data.read_data_sets("./data/mnist_data/", one_hot=True)


    # 1.建立数据的占位符, x[none, 784]  y_true [none, 10]
    with tf.variable_scope("data"):
        x = tf.placeholder(tf.float32, [None, 784])
        y_true = tf.placeholder(tf.int32, [None, 10])

    # 2.建立一个全连接层的神经网络
    with tf.variable_scope("fc_model"):
        # 随机初始化权重和偏置
        weight = tf.Variable(tf.random_normal([784, 10], mean=0.0, stddev=1.0), name="w")
        bias = tf.Variable(tf.constant(0.0, shape=[10]))

        # 预测输出结果
        y_predict = tf.matmul(x, weight) + bias
     #3.求损失值
    with tf.variable_scope("soft_cross"):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_predict))

     # 4.梯度下降求损失并优化
    with tf.variable_scope("optimizer"):
        train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

    #  5.计算准确率
    with tf.variable_scope("acc"):
        equal_list = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_predict, 1))
        accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))

    # 收集变量,用于在tensorboard中显示
    tf.summary.scalar("losses", loss)
    tf.summary.scalar("acc", accuracy)
    tf.summary.histogram("weight", weight)
    tf.summary.histogram("biaseses", bias)

    init_op = tf.global_variables_initializer()

    merge = tf.summary.merge_all()

    sever = tf.train.Saver()

    test = []
    predict =[]
    # 6.开启会话训练
    with tf.Session() as sess:
        sess.run(init_op)

        # 建立events文件,将日志文件存放到./data/mnist_data中
        filewriter = tf.summary.FileWriter("./data/mnist_data", graph=sess.graph)

        if FLAGS.is_train == 1:
        # 迭代训练
            for i in range(2000):

                mnist_x, mnist_y  = mnist.train.next_batch(50)

                sess.run(train_op, feed_dict={
                    x: mnist_x,
                    y_true: mnist_y})
                # 写入每步训练的值
                summary = sess.run(merge, feed_dict={x: mnist_x, y_true: mnist_y})
                filewriter.add_summary(summary, i)
                print("训练 %d 次  准确率为:%f"% (i, sess.run(accuracy, feed_dict={x: mnist_x, y_true: mnist_y})))

        # 保存模型
            sever.save(sess, "./data/mnist_data/mnist_model")
        else:
            # 加载模型
            sever.restore(sess, "./data/mnist_data/mnist_model")
            for i in range(100):
                x_test, y_test = mnist.test.next_batch(1)
                print("第 %d 张图片,目标是:%d, 预测结果是: %d" % (i, tf.argmax(y_test, 1).eval(),
                        tf.argmax(sess.run(y_predict,feed_dict={x: x_test, y_true: y_test}), 1).eval()))
                test.append(tf.argmax(y_test, 1).eval())
                predict.append(tf.argmax(sess.run(y_predict,feed_dict={x: x_test, y_true: y_test}), 1).eval())
            np.save("./data/mnist_data/test", test)
            np.save("./data/mnist_data/predict", predict)
    return None


if __name__ == "__main__":
    fullconnected()

tensorboard可视化实现:首先找到日志的存放目录,这里存放在了./data/mnist_data下,然后启动命令窗口(windows+r > cmd进入)。然后输入tensorboard --logdir=./data/mnist_data即可,运行后将网址复制并用浏览器打开即可看到tensorboard的页面。

卷积神经网络的设计

conv1 + conv2 + fc

卷积层1:卷积+激活+池化。32个filter 5*5,strides 1, padding=same(输入和输出大小一样[none, 28, 28, 1])

卷积层2:卷积+激活+池化

# 定义初始化权重
def weight_variables(shape):
    w = tf.Variable(tf.random_normal(shape=shape, mean=0.0, stddev=1.0))
    return w
# 定义初始化偏置
def bias_variables(shape):
    b = tf.Variable(tf.constant(0.0, shape=shape))
    return b
def model():
    """cnn 模型"""
    # 准备数据的占位符 x[none, 784]  y_true[none, 10]
    with tf.variable_scope("data"):
        x = tf.placeholder(tf.float32, [None, 784])
        y_true = tf.placeholder(tf.int32, [None, 10])
    # conv 1
    with tf.variable_scope("conv_1"):
        # 初始化权重
        w_conv1 = weight_variables([5, 5, 1, 32])
        # 初始化偏置
        b_conv1 = bias_variables([32])
        # 对x进行形状改变
        x_reshape = tf.reshape(x, [-1, 28, 28, 1])
        # [nong, 28, 28, 1]--->[none, 28, 28,32]
        x_relu = tf.nn.relu(tf.nn.conv2d(x_reshape, w_conv1, strides=[1, 1, 1, 1], padding="SAME") + b_conv1)
        # 池化2*2
        x_pool1 = tf.nn.max_pool(x_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
    # conv 2
    with tf.variable_scope("conv2"):
        # 初始化权重
        w_conv2 = weight_variables([5, 5, 32, 64])
        # 初始化偏置
        b_conv2 = bias_variables([64])
        x_pool2 = tf.nn.relu(tf.nn.conv2d(x_pool1, w_conv2, strides=[1, 1, 1, 1], padding="SAME") + b_conv2)
        # 池化
        x_pool2 = tf.nn.max_pool(x_pool2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
    # 全连接层
    with tf.variable_scope("fc"):
        # 初始化权重
        w_fc = weight_variables([7*7*64, 10])
        # 初始化偏置
        b_fc = bias_variables([10])
        # 修改形状
        x_fc_reshape = tf.reshape(x_pool2, [-1, 7*7*64])
        y_predict = tf.matmul(x_fc_reshape, w_fc) + b_fc
    return x, y_true, y_predict
def conv_fc():
    mnist = input_data.read_data_sets("./data/mnist_data", one_hot=True)
    x, y_true, y_predict = model()
    with tf.variable_scope("soft_cross"):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_predict))
         # 4.梯度下降求损失
    with tf.variable_scope("optimizer"):
        train_op = tf.train.GradientDescentOptimizer(0.0001).minimize(loss)
        #  5.计算准确率
    with tf.variable_scope("acc"):
        equal_list = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_predict, 1))
        accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))
    init_op = tf.global_variables_initializer()
    # 开启会话运行
    with tf.Session() as sess:
        sess.run(init_op)
        for i in range(1000):
            mnist_x, mnist_y = mnist.train.next_batch(50)
            sess.run(train_op, feed_dict={x:mnist_x,y_true: mnist_y})
            print("步数: %d 精确率: %f" % (i, sess.run(accuracy, feed_dict={x:mnist_x,y_true: mnist_y})))
    return None
if __name__ == "__main__":
    conv_fc()

这里采用了另一种结构来对mnist数据集进行测试(卷积神经网络),这里的卷积神经网络有两个卷积层来处理。

通过对同一数据集使用不同算法来实现,经过对比可以更好的理解和掌握相关的知识。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值