tensorflow学习之CNN网络

构建网络

应用tensorboard

Saver保存读取

tensorflow新建的模型,保存之后会有四个文件:

checkpoint       # 用来索引最新的模型文件,模型文件包括以下三种

name.data-00000-of-00001    # 保存模型参数

name.index    # 保存模型参数

name.meta   # 用以保存模型的图结构

简单示例:参考自

保存:

import tensorflow as tf
 
# Prepare to feed input, i.e. feed_dict and placeholders
w1 = tf.placeholder("float", name="w1")
w2 = tf.placeholder("float", name="w2")
b1 = tf.Variable(2.0, name="bias")
feed_dict = {w1: 4, w2: 8}
 
# Define a test operation that we will restore
w3 = tf.add(w1, w2)
w4 = tf.multiply(w3, b1, name="result")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
 
# Create a saver object which will save all the variables
saver = tf.train.Saver()
 
# Run the operation by feeding input
print(sess.run(w4, feed_dict))
# Prints 24 which is sum of (w1+w2)*b1
 
# Now, save the graph
saver.save(sess, 'checkpoint\\my_test_model', global_step=1000)

读取:

import tensorflow as tf
 
with tf.Session() as sess:
    # First let's load meta graph and restore weights
    saver = tf.train.import_meta_graph('checkpoint\\my_test_model-1000.meta')
    saver.restore(sess, tf.train.latest_checkpoint('checkpoint/'))
 
    # Now, let's access and create placeholders variables and
    # create feed-dict to feed new data
 
    graph = tf.get_default_graph()
    w1 = graph.get_tensor_by_name("w1:0")
    w2 = graph.get_tensor_by_name("w2:0")
    feed_dict = {w1: 13.0, w2: 13}
 
    # Now, access the op that you want to run.
    op_to_restore = graph.get_tensor_by_name("result:0")
 
    print(sess.run(op_to_restore, feed_dict))
    # This will print 60 which is calculated
    # using new values of w1 and w2 and saved value of b1.

(在saver实例每次调用save方法时,都会创建三个数据文件和一个检查点(checkpoint)文件,权重等参数被以字典的形式保存到.ckpt.data中,图和元数据被保存到.ckpt.meta中,可以被tf.train.import_meta_graph加载到当前默认的图.

注:
1.saver 的操作必须在 sess 建立后进行。
2.model.ckpt 必须存在给定文件夹中,'tmp/model.ckpt' 这里至少要有一层文件夹,否则无法保存。
3.恢复模型时同保存时一样,是 ‘tmp/model.ckpt’,和那3个文件名都不一样。

4.TensorFlow中对训练后的神经网络参数(权重、偏置)提取)


import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data',one_hot=True)


def compute_accuracy(v_xs,v_ys):
    # 全局变量
    global prediction
    # 生成预测值,每个数字的概率
    y_pre = sess.run(prediction,feed_dict={xs:v_xs,ys:v_ys,keep_prob:1})
    # 对比预测值和真实值是否相等,相等就对了
    correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
    # 计算对多少个,tf.cast(x,dtype)将x转换成dtype
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    result = sess.run(accuracy,feed_dict={xs:v_xs,ys:v_ys,keep_prob:1})
    return result


def weight_variable(shape):
    initial = tf.truncated_normal(shape,stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1,shape=shape)
    return tf.Variable(initial)


def conv2d(x, W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')


def max_pooling_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

with tf.name_scope('inputs'):
    keep_prob=tf.placeholder(tf.float32)
    xs=tf.placeholder(tf.float32,[None,784])
    ys=tf.placeholder(tf.float32,[None,10])
    # -1:代表图像数量不确定,1:黑白色,channel为1
    # 将xs变为[28*28*1]的形状
    x_image=tf.reshape(xs, [-1,28,28,1])


# conv1 layer
#patch/kernel=[5,5],input size=1也就是图像的深度为1,output size=32也就是卷积核的个数
with tf.name_scope('layer1'):
    with tf.name_scope('weights'):
        W_conv1=weight_variable([5,5,1,32])
        tf.summary.histogram('layer1/weights',W_conv1)
    with tf.name_scope('biases'):
        b_conv1=bias_variable([32])
        tf.summary.histogram('layer1/biases', b_conv1)
        #hidded layer
        h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1) #output size = 28*28*32
        #pooling layer
        h_pool1=max_pooling_2x2(h_conv1)

# conv2 layer
with tf.name_scope('layer2'):
    with tf.name_scope('weights'):
        W_conv2=weight_variable([5,5,32,64])               #patch 5x5,in size 32,out size 64
        tf.summary.histogram('layer2/weights',W_conv2)
    with tf.name_scope('biases'):
        b_conv2=bias_variable([64])
        tf.summary.histogram('layer2/biases',b_conv2)
        h_conv2=tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)#outputsize=14*14*64
        h_pool2=max_pooling_2x2(h_conv2)                   #output size=7*7*64

# FUNC1
with tf.name_scope('FUNC1'):
    with tf.name_scope('weights'):
        w_fc1 = weight_variable([7*7*64,1024])
        tf.summary.histogram('FUNC1/weights',w_fc1)
    with tf.name_scope('biases'):
        b_fc1 = bias_variable([1024])
        tf.summary.histogram('FUNC1/biases',b_fc1)
        h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1)+b_fc1)
        h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)

# FUNC2
with tf.name_scope('FUNC2'):
    with tf.name_scope('weights'):
        w_fc2 = weight_variable([1024,10])
        tf.summary.histogram('FUNC2/weights',w_fc2)
    with tf.name_scope('biases'):
        b_fc2 = bias_variable([10])
        tf.summary.histogram('FUNC2/biases',b_fc2)

with tf.name_scope('prediction'):
    prediction = tf.nn.softmax(tf.matmul(h_fc1_drop,w_fc2)+b_fc2)


# def add_layer(inputs, in_size,out_size,activation_function=None):
#     with tf.name_scope('layer'):
#         with tf.name_scope('weights'):
#             weights = tf.Variable(tf.random_normal([in_size,out_size]))
#             tf.summary.histogram('layer/weights',weights)
#         with tf.name_scope('biases'):
#             biases = tf.Variable(tf.zeros([1,out_size])+0.1)
#             tf.summary.histogram('layer/biases',biases)
#         with tf.name_scope('Wx_plus_b'):
#             Wx_plus_b = tf.matmul(inputs,weights)+biases
#             tf.summary.histogram('layer/Wx_plus_b',Wx_plus_b)
#         if activation_function is None:
#             outputs = Wx_plus_b
#         else:
#             outputs = activation_function(Wx_plus_b)
#         return outputs


with tf.name_scope('loss'):
    loss = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),
                                       reduction_indices=[1]))
    tf.summary.scalar('loss',loss)
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.0001).minimize(loss)

# 保存模型
saver = tf.train.Saver()

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
merged = tf.summary.merge_all()  # 合并打包
writer = tf.summary.FileWriter('logs/',sess.graph)

# 判断文件夹是否存在,并判断模型是否存在
if not os.path.exists('ckpt_model/'):
    print('i am here')
    os.mkdir('ckpt_model/')
else:
    print("ckpt_model文件夹已存在")
#
if os.path.exists('ckpt_model/checkpoint'):
    saver.restore(sess,'ckpt_model/save_net.ckpt')
    print('测试已有模型')
    print(compute_accuracy(mnist.test.images, mnist.test.labels))
else:
    for i in range(1000):
        batch_xs,batch_ys = mnist.train.next_batch(100)
        sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys,keep_prob:0.5})
        if i%50==0:
            print(sess.run(loss,feed_dict={xs:batch_xs,ys:batch_ys,keep_prob:0.5}))
            print(compute_accuracy(mnist.test.images, mnist.test.labels))
            result = sess.run(merged,feed_dict={xs:batch_xs,ys:batch_ys,keep_prob:0.5})
            writer.add_summary(result,i)
            save_path = saver.save(sess, 'ckpt_model/save_net.ckpt')

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值