Ai challenger 场景分类: train softmax using tfrecord

# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 16:05:02 2017

@author: wayne

references:
输入数据
https://indico.io/blog/tensorflow-data-inputs-part1-placeholders-protobufs-queues/
https://indico.io/blog/tensorflow-data-input-part2-extensions/

整个架构
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/how_tos/reading_data/fully_connected_reader.py
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/udacity/2_fullyconnected.ipynb

模型的存储和调用
https://github.com/SymphonyPy/Valified_Code_Classify/tree/master/Classified
"""

from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import time

def read_and_decode(tfrecords_file, batch_size, num_epochs):  

    filename_queue = tf.train.string_input_producer([tfrecord_file], num_epochs = num_epochs)  
    reader = tf.TFRecordReader()  
    _, serialized_example = reader.read(filename_queue)  

    img_features = tf.parse_single_example(  
                                        serialized_example,  
                                        features={  
                                               'label': tf.FixedLenFeature([], tf.int64),  
                                               'h': tf.FixedLenFeature([], tf.int64),
                                               'w': tf.FixedLenFeature([], tf.int64),
                                               'c': tf.FixedLenFeature([], tf.int64),
                                               'image': tf.FixedLenFeature([], tf.string),  
                                               })  

    h = tf.cast(img_features['h'], tf.int32)
    w = tf.cast(img_features['w'], tf.int32)
    c = tf.cast(img_features['c'], tf.int32)

    image = tf.decode_raw(img_features['image'], tf.uint8)  
    image = tf.reshape(image, [h, w, c])

    label = tf.cast(img_features['label'],tf.int32) 
    label = tf.reshape(label, [1])

    ##########################################################  
    # you can put data augmentation here   
#    distorted_image = tf.random_crop(images, [530, 530, img_channel])
#    distorted_image = tf.image.random_flip_left_right(distorted_image)
#    distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
#    distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
#    distorted_image = tf.image.resize_images(distorted_image, (imagesize,imagesize))
    image = tf.image.per_image_standardization(image)

    image = tf.image.resize_images(image, (image_size,image_size))
    image = tf.reshape(image, [image_size * image_size * 3])
    #image, label = tf.train.batch([image, label],  batch_size= batch_size)  

    image_batch, label_batch = tf.train.batch([image, label],  
                                                batch_size= batch_size,  
                                                num_threads= 64,    # 注意多线程有可能改变图片顺序
                                                capacity = 2000)  
    #print(type(label_batch))
    return image_batch, tf.reshape(label_batch, [batch_size]) 


def read_tfrecord2(tfrecord_file, batch_size):
    train_batch, train_label_batch = read_and_decode(tfrecord_file, batch_size, num_epochs)

    # Variables.
    weights = tf.Variable(
        tf.truncated_normal([image_size * image_size * 3, num_labels]))
    biases = tf.Variable(tf.zeros([num_labels]))

    # Training computation.
    logits = tf.matmul(train_batch, weights) + biases

#    one_hot = tf.one_hot(train_label_batch, num_labels)  # float32
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=train_label_batch, logits=logits))

    # Optimizer.
    optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

    # Predictions for the training, validation, and test data.
    train_prediction = tf.nn.softmax(logits)
    accuracy = tf.reduce_mean(tf.cast(tf.nn.in_top_k(predictions = logits, targets=train_label_batch, k=3),tf.float32))

#      valid_prediction = tf.nn.softmax(
#          tf.matmul(tf_valid_dataset, weights) + biases)
#      test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)

    saver = tf.train.Saver() # 生成saver

    with tf.Session() as sess:
        # https://github.com/tensorflow/tensorflow/issues/1045
        sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
        print("Initialized")

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            step = 0
            start_time = time.time()
            while not coord.should_stop():
#            train_batch2, train_label_batch2 = sess.run([train_batch, train_label_batch])
#            print(train_batch2.shape)
#            print(train_label_batch2.shape)
#            print(train_label_batch2)

                _, l, predictions, logits2, acc= sess.run([optimizer, loss, train_prediction, logits, accuracy])

                duration = time.time() - start_time

                if (step % 10 == 0):
                    print("Minibatch loss at step %d: %.6f (%.3f sec)" % (step, l, duration))
                    #print(logits2.shape)
#                   print(predictions)
#                    print(train_label_batch.eval()) #不对,会更新这些labels
                    print("Minibatch accuracy: %.6f" % acc)
                step += 1
        except tf.errors.OutOfRangeError:
            print('Done training for %d epochs, %d steps.' % (num_epochs, step))
            # 训练完以后,使用saver.save 来保存
            saver.save(sess, "save_path/file_name") #file_name如果不存在的话,会自动创建
        finally:        
            coord.request_stop()

        coord.join(threads)


tfrecord_file = '../ai_challenger_scene_train_20170904/train.tfrecord'
num_labels = 80
batch_size = 256
num_epochs = 2
image_size = 120
read_tfrecord2(tfrecord_file, batch_size)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值