tensorflow 生成.pb文件,加载.pb文件---迁移学习

这篇薄荷主要是讲了如何用tensorflow去训练好一个模型,然后生成相应的pb文件。最后会将如何重新加载这个pb文件。

  • train
    首先说一下train。一开始当然是读图片啦。
  • 用io.imread来读取每一张图片,然后resize成vgg的输入的大小(224,224,3),最后分别放入了data和label中。

    def read_img(path):
        cate   = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
        imgs   = []
        labels = []
        for idx, folder in enumerate(cate):
            for im in glob.glob(folder + '/*.jpg'):
                print('reading the image: %s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (w, h, c))
                imgs.append(img)
                labels.append(idx)
        return np.asarray(imgs, np.float32), np.asarray(labels, np.int32)
    data, label = read_img(path)

    这里是把图片的顺序打乱,先生成一个等差数列,然后打乱,最后赋值回原来的data和label

    num_example = data.shape[0]
    arr = np.arange(num_example)
    np.random.shuffle(arr)
    data = data[arr]
    label = label[arr]

    全部的数据中百分之80的用来train,剩下20的用来test(虽然一共才30张图片。。。。。)

    ratio = 0.8
    s = np.int(num_example * ratio)
    x_train = data[:s]
    y_train = label[:s]
    x_val   = data[s:]
    y_val = label[s:]

    开始build相应的vgg model,这一步不难,但是每一层最好都给上相应的name。上面的x和y是相应的输入和相应的标签。

    def build_network(height, width, channel):
        x = tf.placeholder(tf.float32, shape=[None, height, width, channel], name='input')
        y = tf.placeholder(tf.int64, shape=[None, 2], name='labels_placeholder')
    

    在build的最后,是需要进行误差计算。finaloutput是最后的输出,cost是计算误差,optimize是定义训练时候安什么方式,也注意一下最后的return。

        finaloutput = tf.nn.softmax(output_fc8, name="softmax")
    
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=finaloutput, labels=y))
        optimize = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
    
        prediction_labels = tf.argmax(finaloutput, axis=1, name="output")
        read_labels = y
    
        correct_prediction = tf.equal(prediction_labels, read_labels)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
        correct_times_in_batch = tf.reduce_sum(tf.cast(correct_prediction, tf.int32))
    
        return dict(
            x=x,
            y=y,
            optimize=optimize,
            correct_prediction=correct_prediction,
            correct_times_in_batch=correct_times_in_batch,
            cost=cost,
    )

    接着是训练过程。

    def train_network(graph, batch_size, num_epochs, pb_file_path):
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            sess.run(init)
            epoch_delta = 2
            for epoch_index in range(num_epochs):
                for i in range(12):
                    sess.run([graph['optimize']], feed_dict={
                        graph['x']: np.reshape(x_train[i], (1, 224, 224, 3)),
                        graph['y']: ([[1, 0]] if y_train[i] == 0 else [[0, 1]])
    })

    其实训练的代码就这些,定好了batchsize和numepoch进行训练。下面的代码主要是为了看每几次相应的正确率。

    将训练好的模型保存为pb文件。运行完之后就会发现应该的文件夹多出了一个pb文件。

    constant_graph = graph_util.convert_variables_to_constants(sess,sess.graph_def, ["output"])
    with tf.gfile.FastGFile(pb_file_path, mode='wb') as f:
        f.write(constant_graph.SerializeToString())
    1. test

    打开相应的pb文件。

    with tf.Graph().as_default():
       output_graph_def = tf.GraphDef()
    
       with open(pb_file_path, "rb") as f:
           output_graph_def.ParseFromString(f.read())
           _ = tf.import_graph_def(output_graph_def, name="")

    读取图片文件,resize之后放入模型的输入位置,之后img_out_softmax就是相应输出的结果。

    img = io.imread(jpg_path)
    img = transform.resize(img, (224, 224, 3))
    img_out_softmax = sess.run(out_softmax, feed_dict={input_x:np.reshape(img, [-1, 224, 224, 3])})

    最后放出整个的train和test的代码:
    train

    from PIL import Image
    import numpy as np
    import matplotlib.pyplot as plt
    import matplotlib.image as mpimg
    import tensorflow as tf
    import os
    import glob
    from skimage import io, transform
    from tensorflow.python.framework import graph_util
    import collections
    
    path = '/home/zhoupeilin/vgg16/picture/'
    w = 224
    h = 224
    c = 3
    
    def read_img(path):
        cate   = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
        imgs   = []
        labels = []
        for idx, folder in enumerate(cate):
            for im in glob.glob(folder + '/*.jpg'):
                print('reading the image: %s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (w, h, c))
                imgs.append(img)
                labels.append(idx)
        return np.asarray(imgs, np.float32), np.asarray(labels, np.int32)
    data, label = read_img(path)
    
    num_example = data.shape[0]
    arr = np.arange(num_example)
    np.random.shuffle(arr)
    data = data[arr]
    label = label[arr]
    
    ratio = 0.8
    s = np.int(num_example * ratio)
    x_train = data[:s]
    y_train = label[:s]
    x_val   = data[s:]
    y_val   = label[s:]
    
    def build_network(height, width, channel):
        x = tf.placeholder(tf.float32, shape=[None, height, width, channel], name='input')
        y = tf.placeholder(tf.int64, shape=[None, 2], name='labels_placeholder')
    
        def weight_variable(shape, name="weights"):
            initial = tf.truncated_normal(shape, dtype=tf.float32, stddev=0.1)
            return tf.Variable(initial, name=name)
    
        def bias_variable(shape, name="biases"):
            initial = tf.constant(0.1, dtype=tf.float32, shape=shape)
            return tf.Variable(initial, name=name)
    
        def conv2d(input, w):
            return tf.nn.conv2d(input, w, [1, 1, 1, 1], padding='SAME')
    
        def pool_max(input):
            return tf.nn.max_pool(input,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding='SAME',
                                   name='pool1')
    
        def fc(input, w, b):
            return tf.matmul(input, w) + b
    
        # conv1
        with tf.name_scope('conv1_1') as scope:
            kernel = weight_variable([3, 3, 3, 64])
            biases = bias_variable([64])
            output_conv1_1 = tf.nn.relu(conv2d(x, kernel) + biases, name=scope)
    
        with tf.name_scope('conv1_2') as scope:
            kernel = weight_variable([3, 3, 64, 64])
            biases = bias_variable([64])
            output_conv1_2 = tf.nn.relu(conv2d(output_conv1_1, kernel) + biases, name=scope)
    
        pool1 = pool_max(output_conv1_2)
    
        # conv2
        with tf.name_scope('conv2_1') as scope:
            kernel = weight_variable([3, 3, 64, 128])
            biases = bias_variable([128])
            output_conv2_1 = tf.nn.relu(conv2d(pool1, kernel) + biases, name=scope)
    
        with tf.name_scope('conv2_2') as scope:
            kernel = weight_variable([3, 3, 128, 128])
            biases = bias_variable([128])
            output_conv2_2 = tf.nn.relu(conv2d(output_conv2_1, kernel) + biases, name=scope)
    
        pool2 = pool_max(output_conv2_2)
    
        # conv3
        with tf.name_scope('conv3_1') as scope:
            kernel = weight_variable([3, 3, 128, 256])
            biases = bias_variable([256])
            output_conv3_1 = tf.nn.relu(conv2d(pool2, kernel) + biases, name=scope)
    
        with tf.name_scope('conv3_2') as scope:
            kernel = weight_variable([3, 3, 256, 256])
            biases = bias_variable([256])
            output_conv3_2 = tf.nn.relu(conv2d(output_conv3_1, kernel) + biases, name=scope)
    
        with tf.name_scope('conv3_3') as scope:
            kernel = weight_variable([3, 3, 256, 256])
            biases = bias_variable([256])
            output_conv3_3 = tf.nn.relu(conv2d(output_conv3_2, kernel) + biases, name=scope)
    
        pool3 = pool_max(output_conv3_3)
    
        # conv4
        with tf.name_scope('conv4_1') as scope:
            kernel = weight_variable([3, 3, 256, 512])
            biases = bias_variable([512])
            output_conv4_1 = tf.nn.relu(conv2d(pool3, kernel) + biases, name=scope)
    
        with tf.name_scope('conv4_2') as scope:
            kernel = weight_variable([3, 3, 512, 512])
            biases = bias_variable([512])
            output_conv4_2 = tf.nn.relu(conv2d(output_conv4_1, kernel) + biases, name=scope)
    
        with tf.name_scope('conv4_3') as scope:
            kernel = weight_variable([3, 3, 512, 512])
            biases = bias_variable([512])
            output_conv4_3 = tf.nn.relu(conv2d(output_conv4_2, kernel) + biases, name=scope)
    
        pool4 = pool_max(output_conv4_3)
    
        # conv5
        with tf.name_scope('conv5_1') as scope:
            kernel = weight_variable([3, 3, 512, 512])
            biases = bias_variable([512])
            output_conv5_1 = tf.nn.relu(conv2d(pool4, kernel) + biases, name=scope)
    
        with tf.name_scope('conv5_2') as scope:
            kernel = weight_variable([3, 3, 512, 512])
            biases = bias_variable([512])
            output_conv5_2 = tf.nn.relu(conv2d(output_conv5_1, kernel) + biases, name=scope)
    
        with tf.name_scope('conv5_3') as scope:
            kernel = weight_variable([3, 3, 512, 512])
            biases = bias_variable([512])
            output_conv5_3 = tf.nn.relu(conv2d(output_conv5_2, kernel) + biases, name=scope)
    
        pool5 = pool_max(output_conv5_3)
    
        #fc6
        with tf.name_scope('fc6') as scope:
            shape = int(np.prod(pool5.get_shape()[1:]))
            kernel = weight_variable([shape, 4096])
            biases = bias_variable([4096])
            pool5_flat = tf.reshape(pool5, [-1, shape])
            output_fc6 = tf.nn.relu(fc(pool5_flat, kernel, biases), name=scope)
    
        #fc7
        with tf.name_scope('fc7') as scope:
            kernel = weight_variable([4096, 4096])
            biases = bias_variable([4096])
            output_fc7 = tf.nn.relu(fc(output_fc6, kernel, biases), name=scope)
    
        #fc8
        with tf.name_scope('fc8') as scope:
            kernel = weight_variable([4096, 2])
            biases = bias_variable([2])
            output_fc8 = tf.nn.relu(fc(output_fc7, kernel, biases), name=scope)
    
        finaloutput = tf.nn.softmax(output_fc8, name="softmax")
    
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=finaloutput, labels=y))
        optimize = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
    
        prediction_labels = tf.argmax(finaloutput, axis=1, name="output")
        read_labels = y
    
        correct_prediction = tf.equal(prediction_labels, read_labels)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
        correct_times_in_batch = tf.reduce_sum(tf.cast(correct_prediction, tf.int32))
    
        return dict(
            x=x,
            y=y,
            optimize=optimize,
            correct_prediction=correct_prediction,
            correct_times_in_batch=correct_times_in_batch,
            cost=cost,
        )
    
    
    def train_network(graph, batch_size, num_epochs, pb_file_path):
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            sess.run(init)
            epoch_delta = 2
            for epoch_index in range(num_epochs):
                for i in range(12):
                    sess.run([graph['optimize']], feed_dict={
                        graph['x']: np.reshape(x_train[i], (1, 224, 224, 3)),
                        graph['y']: ([[1, 0]] if y_train[i] == 0 else [[0, 1]])
                    })
                if epoch_index % epoch_delta == 0:
                    total_batches_in_train_set = 0
                    total_correct_times_in_train_set = 0
                    total_cost_in_train_set = 0.
                    for i in range(12):
                        return_correct_times_in_batch = sess.run(graph['correct_times_in_batch'], feed_dict={
                            graph['x']: np.reshape(x_train[i], (1, 224, 224, 3)),
                            graph['y']: ([[1, 0]] if y_train[i] == 0 else [[0, 1]])
                        })
                        mean_cost_in_batch = sess.run(graph['cost'], feed_dict={
                            graph['x']: np.reshape(x_train[i], (1, 224, 224, 3)),
                            graph['y']: ([[1, 0]] if y_train[i] == 0 else [[0, 1]])
                        })
                        total_batches_in_train_set += 1
                        total_correct_times_in_train_set += return_correct_times_in_batch
                        total_cost_in_train_set += (mean_cost_in_batch * batch_size)
    
    
                    total_batches_in_test_set = 0
                    total_correct_times_in_test_set = 0
                    total_cost_in_test_set = 0.
                    for i in range(3):
                        return_correct_times_in_batch = sess.run(graph['correct_times_in_batch'], feed_dict={
                            graph['x']: np.reshape(x_val[i], (1, 224, 224, 3)),
                            graph['y']: ([[1, 0]] if y_val[i] == 0 else [[0, 1]])
                        })
                        mean_cost_in_batch = sess.run(graph['cost'], feed_dict={
                            graph['x']: np.reshape(x_val[i], (1, 224, 224, 3)),
                            graph['y']: ([[1, 0]] if y_val[i] == 0 else [[0, 1]])
                        })
                        total_batches_in_test_set += 1
                        total_correct_times_in_test_set += return_correct_times_in_batch
                        total_cost_in_test_set += (mean_cost_in_batch * batch_size)
    
                    acy_on_test  = total_correct_times_in_test_set / float(total_batches_in_test_set * batch_size)
                    acy_on_train = total_correct_times_in_train_set / float(total_batches_in_train_set * batch_size)
                    print('Epoch - {:2d}, acy_on_test:{:6.2f}%({}/{}),loss_on_test:{:6.2f}, acy_on_train:{:6.2f}%({}/{}),loss_on_train:{:6.2f}'.format(epoch_index, acy_on_test*100.0,total_correct_times_in_test_set,
                                                                                                                                                       total_batches_in_test_set * batch_size,
                                                                                                                                                       total_cost_in_test_set,
                                                                                                                                                       acy_on_train * 100.0,
                                                                                                                                                       total_correct_times_in_train_set,
                                                                                                                                                       total_batches_in_train_set * batch_size,
                                                                                                                                                       total_cost_in_train_set))
                constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["output"])
                with tf.gfile.FastGFile(pb_file_path, mode='wb') as f:
                    f.write(constant_graph.SerializeToString())
    
    
    def main():
        batch_size = 12
        num_epochs = 50
    
        pb_file_path = "vggs.pb"
    
        g = build_network(height=224, width=224, channel=3)
        train_network(g, batch_size, num_epochs, pb_file_path)
    
    main()
    
    
    
    
    

    test

    import tensorflow as tf
    import  numpy as np
    import PIL.Image as Image
    from skimage import io, transform
    
    def recognize(jpg_path, pb_file_path):
        with tf.Graph().as_default():
            output_graph_def = tf.GraphDef()
    
            with open(pb_file_path, "rb") as f:
                output_graph_def.ParseFromString(f.read())
                _ = tf.import_graph_def(output_graph_def, name="")
    
            with tf.Session() as sess:
                init = tf.global_variables_initializer()
                sess.run(init)
    
                input_x = sess.graph.get_tensor_by_name("input:0")
                print input_x
                out_softmax = sess.graph.get_tensor_by_name("softmax:0")
                print out_softmax
                out_label = sess.graph.get_tensor_by_name("output:0")
                print out_label
    
                img = io.imread(jpg_path)
                img = transform.resize(img, (224, 224, 3))
                img_out_softmax = sess.run(out_softmax, feed_dict={input_x:np.reshape(img, [-1, 224, 224, 3])})
    
                print "img_out_softmax:",img_out_softmax
                prediction_labels = np.argmax(img_out_softmax, axis=1)
                print "label:",prediction_labels
    
    recognize("vgg16/picture/dog/dog3.jpg", "vgg16/vggs.pb")
    转载:https://blog.csdn.net/u014432647/article/details/75276718
  • 0
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值