Tensorflow-pb保存与导入

参考:


0、基础

# 保存图表并保存变量参数

from tensorflow.python.framework import graph_util
var_list=tf.global_variables()
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,output_node_names=[var_list[i].name for i in range(len(var_list))]) # 保存图表并保存变量参数
tf.train.write_graph(constant_graph, './output', 'expert-graph.pb', as_text=False)

# -----方式2-------------------
from tensorflow.python.framework import graph_util
var_list=tf.global_variables()
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,output_node_names=[var_list[i].name for i in range(len(var_list))])
with tf.gfile.FastGFile(logdir+'expert-graph.pb', mode='wb') as f:
    f.write(constant_graph.SerializeToString())
# 只保留图表
graph_def = tf.get_default_graph().as_graph_def()
with gfile.GFile('./output/export.pb', 'wb') as f:
    f.write(graph_def.SerializeToString())

# 或者
tf.train.write_graph(graph_def, './output', 'expert-graph.pb', as_text=False)

1、保存pb文件

import tensorflow as tf
from tensorflow.python.framework import graph_util

logdir='./output/'

with tf.variable_scope('conv'):
    w=tf.get_variable('w',[2,2],tf.float32,initializer=tf.random_normal_initializer)
    b=tf.get_variable('b',[2],tf.float32,initializer=tf.random_normal_initializer)


sess=tf.InteractiveSession()

tf.global_variables_initializer().run() # 初始化所有变量

# tf.train.write_graph(tf.get_default_graph(),logdir,'expert-graph.pb',False)

constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["conv/w"])
with tf.gfile.FastGFile(logdir+'expert-graph.pb', mode='wb') as f:
    f.write(constant_graph.SerializeToString())


sess.close()


参考:https://github.com/tensorflow/models/blob/master/research/slim/export_inference_graph.py

# -*- coding: UTF-8 -*-
import tensorflow as tf
from tensorflow.python.framework import graph_util

logdir='./'

placeholder = tf.placeholder(name='input', dtype=tf.float32,
                                 shape=[None, 28* 28* 1])

# tf.train.write_graph(tf.get_default_graph(),logdir,'expert-graph.pb',False)
# constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["conv/w"])
constant_graph = tf.get_default_graph().as_graph_def()
with tf.gfile.FastGFile(logdir+'expert-graph.pb', mode='wb') as f:
    f.write(constant_graph.SerializeToString())

2、导入pb文件

import tensorflow as tf
from tensorflow.python.framework import graph_util


output_graph_path = logdir+'expert-graph.pb'
with tf.Session() as sess:
    # with tf.gfile.FastGFile(output_graph_path, 'rb') as f:
    #     graph_def = tf.GraphDef()
    #     graph_def.ParseFromString(f.read())
    #     sess.graph.as_default()
    #     tf.import_graph_def(graph_def, name='')
    tf.global_variables_initializer().run()
    output_graph_def = tf.GraphDef()
    with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(output_graph_def, name="")

    input_x = sess.graph.get_tensor_by_name("conv/w:0")
    print(input_x.eval())

    output = sess.graph.get_tensor_by_name("conv/b:0")
    print(output.eval())

# -*- coding: UTF-8 -*-
import tensorflow as tf
from tensorflow.python.framework import graph_util
import numpy as np

output_graph_path = r"./expert-graph.pb"
with tf.Session() as sess:
    # with tf.gfile.FastGFile(output_graph_path, 'rb') as f:
    #     graph_def = tf.GraphDef()
    #     graph_def.ParseFromString(f.read())
    #     sess.graph.as_default()
    #     tf.import_graph_def(graph_def, name='')
    tf.global_variables_initializer().run()
    output_graph_def = tf.GraphDef()
    with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(output_graph_def, name="")

    input_x = sess.graph.get_tensor_by_name("input:0")
    print(input_x)
    y=input_x

    out=sess.run(y,{input_x:np.random.random([1,28*28])})
    print(out[:10])

3、mnist例子

保存成.ckpt,然后反复训练,最后将最好的结果保存成.pb文件,用于部署生成测试!

  • train
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import tensorflow as tf
import os
import glob
from skimage import io, transform
from tensorflow.python.framework import graph_util
import collections
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
w = 28
h = 28
c = 1

def build_network(height, width, channel):
    x = tf.placeholder(tf.float32, shape=[None, height,width, channel], name='input')
    y_ = tf.placeholder(tf.int64, shape=[None,], name='y_')

    def weight_variable(shape, name="weights"):
        initial = tf.truncated_normal(shape, dtype=tf.float32, stddev=0.1)*0.001
        return tf.Variable(initial, name=name)

    def bias_variable(shape, name="biases"):
        initial = tf.constant(0.1, dtype=tf.float32, shape=shape)
        return tf.Variable(initial, name=name)

    def conv2d(input, w):
        return tf.nn.conv2d(input, w, [1, 1, 1, 1], padding='SAME')

    def pool_max(input):
        return tf.nn.max_pool(input,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool1')

    def fc(input, w, b):
        return tf.matmul(input, w) + b

    # conv1
    with tf.name_scope('conv1_1') as scope:
        kernel = weight_variable([3, 3, 1, 32])
        biases = bias_variable([32])
        output_conv1_1 = tf.nn.relu(conv2d(x, kernel) + biases, name=scope)

    pool1 = pool_max(output_conv1_1) # [n,14,14,32]

    # conv2
    with tf.name_scope('conv1_2') as scope:
        kernel = weight_variable([3, 3, 32, 64])
        biases = bias_variable([64])
        output_conv1_2 = tf.nn.relu(conv2d(pool1, kernel) + biases, name=scope)

    pool2 = pool_max(output_conv1_2) # [n,7,7,64]

    #fc1
    with tf.name_scope('fc1') as scope:
        shape = int(np.prod(pool2.get_shape()[1:]))
        kernel = weight_variable([shape, 512])
        biases = bias_variable([512])
        pool2_flat = tf.reshape(pool2, [-1, shape])
        output_fc1 = tf.nn.relu(fc(pool2_flat, kernel, biases), name=scope)

    #fc2
    with tf.name_scope('fc2') as scope:
        kernel = weight_variable([512, 10])
        biases = bias_variable([10])
        output_fc2 = fc(output_fc1, kernel, biases)

    y = tf.nn.softmax(output_fc2, name="softmax")

    cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=y_))
    optimize = tf.train.AdamOptimizer(learning_rate=1e-1).minimize(cost)

    prediction_labels = tf.argmax(y, axis=1, name="output")

    correct_prediction = tf.equal(prediction_labels, y_)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    correct_times_in_batch = tf.reduce_sum(tf.cast(correct_prediction, tf.int32))

    return dict(
        x=x,
        y_=y_,
        optimize=optimize,
        correct_prediction=correct_prediction,
        correct_times_in_batch=correct_times_in_batch,
        accuracy=accuracy,
        cost=cost,
    )


def train_network(graph, batch_size, num_epochs, pb_file_path):
    init = tf.global_variables_initializer()

    saver=tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        # 验证之前是否已经保存了检查点文件
        ckpt = tf.train.get_checkpoint_state('./output/')
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        for epoch_index in range(num_epochs):
            for i in range(1000):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                feed={
                    graph['x']: np.reshape(batch_xs, (-1, h, w, c)),
                    graph['y_']: batch_ys}
                sess.run([graph['optimize']], feed_dict=feed)
                if i%100==0:
                    print('step',i,'acc',sess.run(graph['accuracy'],feed),'loss',sess.run(graph['cost'],feed))

                saver.save(sess,'./output/model.ckpt',global_step=i)

            constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,['output'])
            with tf.gfile.FastGFile(pb_file_path, mode='wb') as f:
                f.write(constant_graph.SerializeToString())


def main():
    batch_size = 128
    num_epochs = 2

    pb_file_path = "mnist.pb"

    g = build_network(height=h, width=w, channel=c)
    train_network(g, batch_size, num_epochs, pb_file_path)

main()
  • test
import tensorflow as tf
import  numpy as np
import PIL.Image as Image
from skimage import io, transform

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)

def recognize(mnist, pb_file_path):
    with tf.Graph().as_default():
        output_graph_def = tf.GraphDef()

        with open(pb_file_path, "rb") as f:
            output_graph_def.ParseFromString(f.read())
            _ = tf.import_graph_def(output_graph_def, name="")

        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)

            input_x = sess.graph.get_tensor_by_name("input:0")
            # print input_x
            out_softmax = sess.graph.get_tensor_by_name("softmax:0")
            # print out_softmax
            # out_label = sess.graph.get_tensor_by_name("output:0")
            # print out_label

            # img = io.imread(jpg_path)
            # img = transform.resize(img, (224, 224, 3))
            img_out_softmax = sess.run(out_softmax, feed_dict={input_x:np.reshape(mnist.test.images[0], [-1, 28, 28, 1])})

            print ("img_out_softmax:",img_out_softmax)
            prediction_labels = np.argmax(img_out_softmax, axis=1)
            print ("label:",prediction_labels)
            print('true label:',mnist.test.labels[0])

recognize(mnist, "./mnist.pb")

4、对输出层进行修改

  • 保存pb文件
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import graph_util
import collections
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
w = 28
h = 28
c = 1

def build_network(height, width, channel):
    x = tf.placeholder(tf.float32, shape=[None, height,width, channel], name='input')
    y_ = tf.placeholder(tf.int64, shape=[None,], name='y_')

    def weight_variable(shape, name="weights"):
        initial = tf.truncated_normal(shape, dtype=tf.float32, stddev=0.1)*0.001
        return tf.Variable(initial, name=name)

    def bias_variable(shape, name="biases"):
        initial = tf.constant(0.1, dtype=tf.float32, shape=shape)
        return tf.Variable(initial, name=name)

    def conv2d(input, w):
        return tf.nn.conv2d(input, w, [1, 1, 1, 1], padding='SAME')

    def pool_max(input):
        return tf.nn.max_pool(input,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool1')

    def fc(input, w, b):
        return tf.matmul(input, w) + b

    # conv1
    with tf.name_scope('conv1_1') as scope:
        kernel = weight_variable([3, 3, 1, 32])
        biases = bias_variable([32])
        output_conv1_1 = tf.nn.relu(conv2d(x, kernel) + biases, name=scope)

    pool1 = pool_max(output_conv1_1) # [n,14,14,32]

    # conv2
    with tf.name_scope('conv1_2') as scope:
        kernel = weight_variable([3, 3, 32, 64])
        biases = bias_variable([64])
        output_conv1_2 = tf.nn.relu(conv2d(pool1, kernel) + biases, name=scope)

    pool2 = pool_max(output_conv1_2) # [n,7,7,64]

    #fc1
    with tf.name_scope('fc1') as scope:
        shape = int(np.prod(pool2.get_shape()[1:]))
        kernel = weight_variable([shape, 512])
        biases = bias_variable([512])
        pool2_flat = tf.reshape(pool2, [-1, shape])
        output_fc1 = tf.nn.relu(fc(pool2_flat, kernel, biases), name=scope)

    #fc2
    with tf.name_scope('fc2') as scope:
        kernel = weight_variable([512, 10])
        biases = bias_variable([10])
        output_fc2 = fc(output_fc1, kernel, biases)

    y = tf.nn.softmax(output_fc2, name="softmax")

    cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=y_))
    optimize = tf.train.AdamOptimizer(learning_rate=1e-1).minimize(cost)

    prediction_labels = tf.argmax(y, axis=1, name="output")

    correct_prediction = tf.equal(prediction_labels, y_)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    correct_times_in_batch = tf.reduce_sum(tf.cast(correct_prediction, tf.int32))

    return dict(
        x=x,
        y_=y_,
        optimize=optimize,
        correct_prediction=correct_prediction,
        correct_times_in_batch=correct_times_in_batch,
        accuracy=accuracy,
        cost=cost,
    )


def train_network(graph, batch_size, num_epochs, pb_file_path):
    init = tf.global_variables_initializer()

    saver=tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        # 验证之前是否已经保存了检查点文件
        ckpt = tf.train.get_checkpoint_state('./output/')
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        for epoch_index in range(num_epochs):
            for i in range(1000):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                feed={
                    graph['x']: np.reshape(batch_xs, (-1, h, w, c)),
                    graph['y_']: batch_ys}
                sess.run([graph['optimize']], feed_dict=feed)
                if i%100==0:
                    print('step',i,'acc',sess.run(graph['accuracy'],feed),'loss',sess.run(graph['cost'],feed))

                saver.save(sess,'./output/model.ckpt',global_step=i)

            constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,['input','y_','fc1'])
            with tf.gfile.FastGFile(pb_file_path, mode='wb') as f:
                f.write(constant_graph.SerializeToString())


def main():
    batch_size = 128
    num_epochs = 2

    pb_file_path = "mnist.pb"

    g = build_network(height=h, width=w, channel=c)
    train_network(g, batch_size, num_epochs, pb_file_path)

main()
  • 导入pb文件并修改输出层
import tensorflow as tf
import  numpy as np
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)

def recognize(mnist, pb_file_path):
    with tf.Graph().as_default():
        output_graph_def = tf.GraphDef()

        with open(pb_file_path, "rb") as f:
            output_graph_def.ParseFromString(f.read())
            _ = tf.import_graph_def(output_graph_def, name="")

        def weight_variable(shape, name="weights"):
            initial = tf.truncated_normal(shape, dtype=tf.float32, stddev=0.1) * 0.001
            return tf.Variable(initial, name=name)

        def bias_variable(shape, name="biases"):
            initial = tf.constant(0.1, dtype=tf.float32, shape=shape)
            return tf.Variable(initial, name=name)

        def fc(input, w, b):
            return tf.matmul(input, w) + b

        with tf.Session() as sess:
            # init = tf.global_variables_initializer()
            # sess.run(init)

            input_x = sess.graph.get_tensor_by_name("input:0")
            input_y=sess.graph.get_tensor_by_name('y_:0')

            output_fc1=sess.graph.get_tensor_by_name('fc1:0')

            with tf.name_scope('fc2') as scope:
                kernel =weight_variable([512,1024])
                biases = bias_variable([1024])
                output_fc2 = tf.nn.relu(fc(output_fc1, kernel, biases),name=scope)

            with tf.name_scope('fc3') as scope:
                kernel = weight_variable([1024,10])
                biases = bias_variable([10])
                output_fc3 = fc(output_fc2, kernel, biases)

            y = tf.nn.softmax(output_fc3, name="softmax")

            cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=input_y))
            optimize = tf.train.AdamOptimizer(learning_rate=1e-1).minimize(cost)

            prediction_labels = tf.argmax(y, axis=1, name="output")

            correct_prediction = tf.equal(prediction_labels, input_y)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

            tf.global_variables_initializer().run()

            saver = tf.train.Saver()
            # 验证之前是否已经保存了检查点文件
            ckpt = tf.train.get_checkpoint_state('./output2/')
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

            for i in range(100):
                batch_xs, batch_ys = mnist.train.next_batch(32)
                feed = {
                    input_x: np.reshape(batch_xs, (-1, 28, 28, 1)),
                    input_y: batch_ys}
                sess.run(optimize, feed_dict=feed)
                if i % 100 == 0:
                    print('step', i, 'acc', sess.run(accuracy, feed), 'loss',
                          sess.run(cost, feed))

                saver.save(sess, './output2/model.ckpt', global_step=i)

recognize(mnist, "./mnist.pb")

5、mnist例子-2

  • 部署生成测试

①保存pb文件

#!/usr/bin/python
# -*- coding: UTF-8 -*-

# from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import graph_util
# number 1 to 10 data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

def compute_accuracy(v_xs, v_ys):
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
    correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
    return result

def weight_variable(shape):
    inital=tf.truncated_normal(shape,stddev=0.1) #产生随机变量
    return tf.Variable(inital)

def bias_variable(shape):
    inital=tf.constant(0.1,tf.float32,shape)
    return tf.Variable(inital)

def conv2d(x, W):
    #stride[1,x_movement,y_movement,1]
    #Must have stride[0]=stride[3]=1
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')

def max_pool_2x2(x):
    #Must have stride[0]=stride[3]=1
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 784],name='input') # 28x28
ys = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32,name='keep_prob')

x_image=tf.reshape(xs,[-1,28,28,1]) # 28x28
# print(x_image.shape)#[n_samples,28,28,1] 1表示黑白图 如果是RGB是3

## conv1 layer ##
# patch 5x5 in size 1,out size 32 取样时按5x5个像素 1为图层厚度,因为这里是黑白图所有
# 为1,32为取样压缩后图的厚度
W_conv1=weight_variable([5,5,1,32])
b_conv1=bias_variable([32])
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1) #output size 28x28x32取样后图片的长宽不变 图层厚度变化
h_pool1=max_pool_2x2(h_conv1) #output size 14x14x32  x,y方向都是跨2歩取 都缩小一半

## conv2 layer ##
W_conv2=weight_variable([5,5,32,64]) #图层厚度变成64
b_conv2=bias_variable([64])
h_conv2=tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2) #output size 14x14x64 
h_pool2=max_pool_2x2(h_conv2)  #output size 7x7x64

## func1 layer ##

W_fc1=weight_variable([7*7*64,1024])
b_fc1=bias_variable([1024])
#[n_samples,7,7,64]-->[n_sample,7*7*64] 由三维 变成1维
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)

h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)

## func2 layer ##
W_fc2=weight_variable([1024,10])
b_fc2=bias_variable([10])
prediction=tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2,name='softmax')

# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
                                              reduction_indices=[1]))       # loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

sess = tf.Session()
# important step
sess.run(tf.global_variables_initializer())

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
    if i % 50 == 0:
        print(compute_accuracy(
            mnist.test.images, mnist.test.labels))

constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,['input','keep_prob','softmax'])
with tf.gfile.FastGFile('mnist.pb', mode='wb') as f:
    f.write(constant_graph.SerializeToString())

sess.close()

这里写图片描述

②导入pb文件,部署生成测试

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

with tf.Graph().as_default():
    output_graph_def = tf.GraphDef()

    with open('mnist.pb', "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(output_graph_def, name="")

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        input_x = sess.graph.get_tensor_by_name("input:0")
        keep_prob=sess.graph.get_tensor_by_name("keep_prob:0")
        out_softmax = sess.graph.get_tensor_by_name("softmax:0")

        img_out_softmax = sess.run(out_softmax, feed_dict={input_x:mnist.test.images[:10],keep_prob:1.})
        prediction_labels = np.argmax(img_out_softmax, axis=1)
        print("label:", prediction_labels)
        print('true label:', np.argmax(mnist.test.labels[:10], axis=1))

这里写图片描述

  • 改写输出层

①保存pb文件

#!/usr/bin/python
# -*- coding: UTF-8 -*-

# from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import graph_util
# number 1 to 10 data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

def compute_accuracy(v_xs, v_ys):
    global prediction
    y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
    correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
    return result

def weight_variable(shape):
    inital=tf.truncated_normal(shape,stddev=0.1) #产生随机变量
    return tf.Variable(inital)

def bias_variable(shape):
    inital=tf.constant(0.1,tf.float32,shape)
    return tf.Variable(inital)

def conv2d(x, W):
    #stride[1,x_movement,y_movement,1]
    #Must have stride[0]=stride[3]=1
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')

def max_pool_2x2(x):
    #Must have stride[0]=stride[3]=1
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 784],name='input') # 28x28
ys = tf.placeholder(tf.float32, [None, 10],name='ys')
keep_prob = tf.placeholder(tf.float32,name='keep_prob')

x_image=tf.reshape(xs,[-1,28,28,1]) # 28x28
# print(x_image.shape)#[n_samples,28,28,1] 1表示黑白图 如果是RGB是3

## conv1 layer ##
# patch 5x5 in size 1,out size 32 取样时按5x5个像素 1为图层厚度,因为这里是黑白图所有
# 为1,32为取样压缩后图的厚度
W_conv1=weight_variable([5,5,1,32])
b_conv1=bias_variable([32])
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1) #output size 28x28x32取样后图片的长宽不变 图层厚度变化
h_pool1=max_pool_2x2(h_conv1) #output size 14x14x32  x,y方向都是跨2歩取 都缩小一半

## conv2 layer ##
W_conv2=weight_variable([5,5,32,64]) #图层厚度变成64
b_conv2=bias_variable([64])
h_conv2=tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2) #output size 14x14x64 
h_pool2=max_pool_2x2(h_conv2)  #output size 7x7x64

## func1 layer ##

W_fc1=weight_variable([7*7*64,1024])
b_fc1=bias_variable([1024])
#[n_samples,7,7,64]-->[n_sample,7*7*64] 由三维 变成1维
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1,name='fc1')

h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)

## func2 layer ##
W_fc2=weight_variable([1024,10])
b_fc2=bias_variable([10])
prediction=tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2,name='softmax')

# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
                                              reduction_indices=[1]))       # loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

sess = tf.Session()
# important step
sess.run(tf.global_variables_initializer())

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
    if i % 50 == 0:
        print(compute_accuracy(
            mnist.test.images, mnist.test.labels))

constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,['input','ys','keep_prob','fc1'])
with tf.gfile.FastGFile('mnist.pb', mode='wb') as f:
    f.write(constant_graph.SerializeToString())

sess.close()

这里写图片描述

②导入pb文件并改写输出层

附加:通过改写输出层可以实现模型迁移

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

with tf.Graph().as_default():
    output_graph_def = tf.GraphDef()

    with open('mnist.pb', "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(output_graph_def, name="")


    def weight_variable(shape):
        inital = tf.truncated_normal(shape, stddev=0.1)  # 产生随机变量
        return tf.Variable(inital)


    def bias_variable(shape):
        inital = tf.constant(0.1, tf.float32, shape)
        return tf.Variable(inital)


    with tf.Session() as sess:
        # init = tf.global_variables_initializer()
        # sess.run(init)

        input_x = sess.graph.get_tensor_by_name("input:0")
        ys=sess.graph.get_tensor_by_name("ys:0")
        keep_prob=sess.graph.get_tensor_by_name("keep_prob:0")
        fc1=sess.graph.get_tensor_by_name("fc1:0")

        h_fc1_drop = tf.nn.dropout(fc1, keep_prob)

        ## func2 layer ##
        W_fc2 = weight_variable([1024, 1024])
        b_fc2 = bias_variable([1024])
        h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

        h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)

        ## func3 layer ##
        W_fc3 = weight_variable([1024, 10])
        b_fc3 = bias_variable([10])
        prediction = tf.nn.softmax(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)

        # the error between prediction and real data
        cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
                                                      reduction_indices=[1]))  # loss
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

        correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(ys, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        init = tf.global_variables_initializer()
        sess.run(init)

        for step in range(200):
            batch_xs, batch_ys = mnist.train.next_batch(100)
            sess.run(train_step, feed_dict={input_x: batch_xs, ys: batch_ys, keep_prob: 0.5})
            if step % 20== 0:
                print('step',step,'acc',accuracy.eval({input_x: mnist.test.images, ys: mnist.test.labels, keep_prob: 1.}))

这里写图片描述

5、查看op.name

import tensorflow as tf
import os

def create_graph():
    """Creates a graph from saved GraphDef file and returns a saver."""
    # Creates graph from saved graph_def.pb.
    with tf.gfile.FastGFile(os.path.join('mnist.pb'), 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')


# print all op names 、type 、shape
def print_ops():
    create_graph()
    with tf.Session() as sess:
        ops = sess.graph.get_operations()
        for op in ops:
            print(op.name,op.outputs)

print_ops()
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值