卷积实现fashion_mnist的训练和tensorboard的使用

卷积实现fashion_mnist的训练和tensorboard的使用

卷积神经网络架构:

1.convolutional layer1 + max pooling,输入[?,28,28,1],输出[?,14,14,32];
2.convolutional layer2 + max pooling,输入[?,14,14,32],输出[?,7,7,64];
3.fully connected layer1 + dropout,输入[?,7764],输出[?,1024];
4.fully connected layer2 to prediction,输入[?,1024],输出[?,10];

卷积实现代码

#导入所有需要的包
import tensorflow as tf
from tensorflow import keras
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
# number 1 to 10 data
用mnist数据集做卷积
#mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
#定义y的转换函数 因为fashion_mnist数据集的结果为数字0-9需转换为0000000000类型的数据
def hanshu(list_1):
    list_3 = []
    for i in list_1:
        list_2 = [0,0,0,0,0,0,0,0,0,0]
        list_2[i] = 1
        list_3.append(list_2)
        continue
#导入fashion_mnist数据集
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_labels_ts = hanshu(train_labels)
test_labels_ts = hanshu(test_labels)
train_images = train_images/255
test_images = test_images/255

#定义自变量和因变量
xs = tf.placeholder(tf.float32, [None, 28, 28], name='xs')
ys = tf.placeholder(tf.float32, [None,10], name='ys')

#定义dropout的placeholder,解决过拟合	
keep_drop = tf.placeholder(tf.float32)

#定义初始输入
x_image = tf.reshape(xs,[-1,28,28,1])

#卷积第一层
#结果是[?,28,28,32]
with tf.name_scope('conv1'):
    W_conv1 = tf.Variable(tf.truncated_normal(shape=[5,5,1,32],stddev=0.1), name='W')
    b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]), name='b')
    h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1,1,1,1], padding='SAME')+b_conv1, name='h')
    
#池化第一层
#结果是[?,14,14,32]
with tf.name_scope('pool1'):
    h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME', name='h')
    
#卷积第二层
#结果是[?,14,14,64]
with tf.name_scope('conv2'):
    W_conv2 = tf.Variable(tf.truncated_normal(shape=[5,5,32,64],stddev=0.1), name='W')
    b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]), name='b')
    h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2, name='h')

#池化第二层
#结果是[?,7,7,64]
with tf.name_scope('pool2'):
    h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME', name='h')

#降维打击!!![?,7*7*64]
h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*64])

#全连接第一层
#结果是[?,1024]
with tf.name_scope('fc1'):
    W_fc1 = tf.Variable(tf.truncated_normal(shape=[7*7*64, 1024], stddev=0.1), name='W')
    b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]), name='b')
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1, name='h')
#防止过拟合
h_fc1_drop = tf.nn.dropout(h_fc1, keep_drop)

#全连接第二层
with tf.name_scope('fc2'):
    W_fc2 = tf.Variable(tf.truncated_normal(shape=[1024, 10], stddev=0.1), name='W')
    b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]), name='b')
    prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name='prediction')

#损失函数
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))

#训练函数
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

#准确率
corrent_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(ys,1))
accuracy = tf.reduce_mean(tf.cast(corrent_prediction, tf.float32))

#定义可视化参数
tf.summary.scalar("loss", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
merged_summary_op = tf.summary.merge_all()

sess = tf.Session()
#
writer = tf.summary.FileWriter('logs_cnn',sess.graph)
sess.run(tf.global_variables_initializer())
for epoch in range(5):
    m = 0
    for n in range(1,600):
        sess.run([train_step],feed_dict={xs:train_images[m:m+100],ys:train_labels_ts[m:m+100],keep_drop:0.5})
        m+=100
        if n % 100==0:
            acc = sess.run([accuracy], feed_dict={xs: test_images[:100], ys: test_labels_ts[:100], keep_drop: 1})
            loss = sess.run([cross_entropy], feed_dict={xs: test_images[:100], ys: test_labels_ts[:100], keep_drop: 1})
            print(acc, loss)
            result = sess.run(merged_summary_op, feed_dict={xs: test_images, ys: test_labels_ts, keep_drop: 1})
            writer.add_summary(result, n*100+epoch*60000)
    print('循环完成{}轮!!!'.format(epoch+1))
# 用minst做测试
# for i in range(1000):
#     input_batch, labels_batch = mnist.train.next_batch(100)
#     sess.run([train_step],feed_dict={xs:input_batch,ys:labels_batch,keep_drop: 0.5})
#     if i % 50 == 0:
#         acc = sess.run([accuracy], feed_dict={xs:mnist.test.images[:1000], ys:mnist.test.labels[:1000], keep_drop: 1})
#         loss = sess.run([cross_entropy], feed_dict={xs:mnist.test.images[:1000], ys:mnist.test.labels[:1000], keep_drop: 1})
#         print(acc,loss)
#         result = sess.run(merged_summary_op,feed_dict={xs:mnist.test.images[:1000],ys:mnist.test.labels[:1000],keep_drop: 1})
#         writer.add_summary(result, i)

训练过程中即可在命令行中输入

tensorboard --logdir=log_cnn --host=127.0.0.1

命令执行成功后,在浏览器中输入127.0.0.1:6006即可进入tensorboard查看参数和建立的网络模型。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值