tensorflow-并行输入训练数据-demo

1. 生成数据:

import numpy as np
from sklearn.datasets.samples_generator import make_classification
import tensorflow as tf
import matplotlib.pyplot as plt
import os

def generate_data(data_num,data_dim):

    X1, Y1 = make_classification(n_samples=data_num, n_features=data_dim, n_redundant=0,n_clusters_per_class=1, n_classes=2)
    plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
    plt.show()
    print(Y1)
    return X1,Y1

def make_example(features,label):
    example = tf.train.Example(features=tf.train.Features(
        feature={
            'data':tf.train.Feature(float_list=tf.train.FloatList(value=features)),
            'label':tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))
        }
    ))
    return example

def generate_tfrecords(data_num,data_dim,filename):
    X,Y = generate_data(data_num,data_dim)
    if os.path.exists(filename):
        os.remove(filename)
    writer = tf.python_io.TFRecordWriter(filename)
    for x,y in zip(X,Y):
        example = make_example(x,y)
        writer.write(example.SerializeToString())
    writer.close()

if __name__ == '__main__':
    generate_tfrecords(1000,2,'reg.tfrecords')

2. 创建模型

import tensorflow as tf

class Logistic(object):
    def __init__(self,config,data,label):
        self.data = data
        self.label = label
        data_dim = config.data_dim
        label_dim = config.label_dim
        lr = config.learning_rate

        softmax_w = tf.get_variable('softmax_w',shape=[data_dim,label_dim])
        softmax_b = tf.get_variable('softmax_b',shape=[label_dim])

        with tf.name_scope('logist'):
            self.logits = tf.matmul(self.data,softmax_w)+softmax_b
        with tf.name_scope('loss'):
            self.loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits + 1e-10, labels=self.label))
        self.prediction = tf.argmax(self.logits,axis=1)
        self.correct = tf.cast(tf.equal(self.prediction,self.label),tf.float32)
        self.accuracy = tf.reduce_mean(self.correct)
        self.train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(self.loss)

3. 生成训练数据和训练

import tensorflow as tf
from model import Logistic


def read_my_file_format(filename_queue):
    reader = tf.TFRecordReader()
    _, serilized_example = reader.read(filename_queue)
    # parsing_example
    features = tf.parse_single_example(serilized_example,
                                       features={
                                           'data': tf.FixedLenFeature([2], tf.float32),
                                           'label': tf.FixedLenFeature([], tf.int64)
                                       })
    return features['data'], features['label']


def input_pipeline(filenames, batch_size, num_epochs=100):
    filename_queue = tf.train.string_input_producer([filenames], num_epochs=num_epochs, shuffle=True)
    data, label = read_my_file_format(filename_queue)
    datas, labels = tf.train.shuffle_batch([data, label], batch_size=batch_size, num_threads=5,
                                           capacity=1000 + 3 * batch_size, min_after_dequeue=1000)
    return datas, labels


class Config(object):
    data_dim = 2
    label_dim = 2
    learning_rate = 0.01
    init_scale = 0.01


def run_traning():
    with tf.Graph().as_default(), tf.Session() as sess:
        datas, labels = input_pipeline('reg.tfrecords', 32)
        config = Config()
        initializer = tf.random_uniform_initializer(-1 * config.init_scale, 1 * config.init_scale)

        with tf.variable_scope('model', initializer=initializer):
            model = Logistic(config=config, data=datas, label=labels)
        fetches = [model.train_op, model.accuracy, model.loss]
        feed_dict = {}

        # init
        init_op = tf.group(
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        )
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        print(threads)
        try:
            while not coord.should_stop():
                print('not should_stop')
                _, acc_val, loss_val = sess.run(fetches, feed_dict)
                print('the loss is %f and the accuracy is %f ' % (loss_val, acc_val))
        except tf.errors.OutOfRangeError:
            print('OutOfRangeError ')
        finally:
            coord.request_stop()
        coord.join(threads)
        sess.close()


def main():
    run_traning()


if __name__ == '__main__':
    main()

 

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值