(六)Tensorflow的CNN模型构建

利用tensorflow的数据集,搭建CNN网络(2个卷积层,2个池化层,1个全连接层),实现了保存模型,断点续跑功能!

1.代码展示

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os
import math

n_times = 100
model_save_path = r".\model"
model_name = "model.ckpt"
iteration = 0    #模型失败的迭代次数

learning_rate = 0.001
batch_size = 100

input_node = 784
output_node = 10

image_size = 28
image_channels = 1

conv1_deep = 32
conv1_size = 5

conv2_deep = 64
conv2_size = 5

pool_size = 2
# fc_size = 512
def get_weights_variable(shape,name):
    return tf.get_variable(name=name,initializer=tf.truncated_normal(shape=shape,stddev=0.1,seed=1))

def get_biase_variable(shape,name):
    return tf.get_variable(name=name,initializer=tf.zeros(shape=shape))

def inference(input_data):
    #reshape成卷积网络的数据格式
    input_data = tf.reshape(input_data,[batch_size,image_size,image_size,image_channels])
    #第一个卷积层在接上激励层
    with tf.variable_scope("layer1-conv1"):
        conv1_kernel = [conv1_size,conv1_size,image_channels,conv1_deep] #卷积核的大小,,[行,列,通道数,神经元数]
        conv1_weight = get_weights_variable(conv1_kernel,"conv1_kernel")
        conv1_biase = get_biase_variable([conv1_deep],name="conv1_biase")
        #strides=[1,1,1,1]第一个1和最后一个1代表batch_size,channels,一般不会改变!
        conv1 = tf.nn.conv2d(input_data,conv1_weight,strides=[1,1,1,1],padding="SAME")
        conv1 = tf.add(tf.nn.relu(conv1),conv1_biase)
    #第一个池化层
    with tf.variable_scope("layer2-pool1"):
        pool_1 = tf.nn.max_pool(conv1,ksize=[1,pool_size,pool_size,1],strides=[1,2,2,1],padding="SAME")
    #第二个卷积层在接上激励层
    with tf.variable_scope("layer3-conv2"):
        conv2_kernel = [conv2_size,conv2_size,conv1_deep,conv2_deep] #卷积核的大小,,[行,列,通道数,神经元数]
        conv2_weight = get_weights_variable(conv2_kernel,name="conv2_kernel")
        conv2_biase = get_biase_variable([conv2_deep],name="conv2_biase")
        conv2 = tf.nn.conv2d(pool_1,conv2_weight,strides=[1,conv2_size,conv2_size,1],padding="SAME")
        conv2 = tf.add(tf.nn.relu(conv2),conv2_biase)
    #第二个池化层
    with tf.variable_scope("layer4-pool2"):
        pool_2 = tf.nn.max_pool(conv2,ksize=[1,pool_size,pool_size,1],strides=[1,2,2,1],padding="SAME")
    #reshape成二维矩阵batch_size*node
    pool_shape = pool_2.get_shape().as_list()
    node = pool_shape[1]*pool_shape[2]*pool_shape[3]
    reshaped = tf.reshape(pool_2,[batch_size,node])
    #最后一个全连接层在接上激励层
    with tf.variable_scope("layer5"):
        fc_weight = get_weights_variable([node,output_node],name="fc_weight")
        fc_biase = get_biase_variable([output_node],name="fc_biase")
        fc_layer = tf.nn.relu(tf.add(tf.matmul(reshaped,fc_weight),fc_biase))
    return fc_layer

def CNN_model():
    #载入数据
    mnist = input_data.read_data_sets(r"C:\Users\Administrator\Desktop\AI_project\tensorflow\MNIST_data",one_hot=True)
    n_batch_train = math.ceil(mnist.train.num_examples / batch_size)
    n_batch_test = math.ceil(mnist.test.num_examples / batch_size)
    #对数据进行placeholder
    x = tf.placeholder(tf.float32,[batch_size,input_node],name="x_input")
    y = tf.placeholder(tf.float32,[batch_size,output_node],name="y_output")
    #传入数据,返回结果
    result = inference(x)
    #定义损失函数和优化器
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y,logits=result,name="loss"))
    train  = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
    #对结果进行预测
    predation = tf.equal(tf.argmax(y,axis=1),tf.argmax(result,axis=1))
    accuary = tf.reduce_mean(tf.cast(predation,tf.float32))
    #定义保存模型的op
    saver = tf.train.Saver()
    #初始化变量
    init = tf.global_variables_initializer()
    #运用session执行图的运算
    with tf.Session() as sess:
        sess.run(init)
        #加载模型
        if os.path.exists(os.path.join(model_save_path,model_name)):
            print( os.path.join(model_save_path,model_name)+"-"+str(iteration) )
            path = os.path.join(model_save_path,model_name)+"-"+str(iteration)
            # ckpt = tf.train.get_checkpoint_state('./model/')
            # saver.restore(sess, ckpt.model_checkpoint_path)
            saver.restore(sess,path)
        else:
            os.mkdir(model_save_path)
            writer = tf.summary.FileWriter("./graph", sess.graph)
            writer.close()
        #进行训练和测试
        for i in range(iteration,n_times):
            #训练
            loss_total = 0
            for _ in range(n_batch_train):
                x_input,y_input = mnist.train.next_batch(batch_size)
                _,l = sess.run([train,loss],feed_dict={x:x_input,y:y_input})
                loss_total+=l
            #测试
            accuary_total = 0
            for _ in range(n_batch_test):
                x_input,y_input = mnist.test.next_batch(batch_size)
                accuary_total+=sess.run(accuary,feed_dict={x:x_input,y:y_input})

            print("Iteration:{},train_loss:{},accuary:{}".format(i, loss_total / n_batch_train,accuary_total / n_batch_test))
            #保存模型,防止出事故,每十次进行保存模型
            if i%2==0:
                saver.save(sess,os.path.join(model_save_path,model_name),global_step=i)
    return

2.效果展示

Iteration:0,train_loss:0.10074821015311913,accuary:0.9765000128746033
Iteration:1,train_loss:0.06776840671642938,accuary:0.9841000121831894
Iteration:2,train_loss:0.05306383292614059,accuary:0.9825000113248825

3.总结:

需要特别注意,卷积核的定义[ kernel_size,kernel_size, channels, kernel_deep ],输入卷积层的数据格式

[ batch_size, image_size,image_size,image_channels ], 池化层的定义ksize = [1,pool_size,pool_size,1] ,以及全连接层的reshape

  • 6
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值