tensorflow的tensorboard的使用在Windows下的改进版及自定义读取函数

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import tensorflow as tf
import sys
import numpy as np
import collections
from sklearn import preprocessing
import time
from tensorflow.python.ops import array_ops

Datasets = collections.namedtuple('Datasets', ['train', 'test'])

def input_fn(data_set):
    # convert csv file to matrix
    data_matrix = np.loadtxt(open(data_set, "rb"), delimiter=",", skiprows=0)
    np.random.shuffle(data_matrix)
    data = data_matrix[:, :62]
    label_1= data_matrix[:, 62:63]
    length =len(data)
    
    perm=np.arange(length)
    np.random.shuffle(perm)
    print(perm)
    data=data[perm]
    label_1=label_1[perm]
    
    #one_hot1
    enc = preprocessing.OneHotEncoder() 
    enc.fit(label_1)  
    label_1 = enc.transform(label_1).toarray() 
    
    #concatenation
    label=np.reshape(label_1,(length,8))
    #convert to array 
    data = np.asarray(data, dtype=np.float32)
    label = np.asarray(label, dtype=np.float32)
    train_data = data[:70000]
    eval_data = data[70000:]

    train_labels = label[:70000]

    eval_labels = label[70000:]


    return train_data, train_labels, eval_data, eval_labels



   #批处理读取函数训练中 
class DataSet(object):

    def __init__(self,
                 data,
                 labels,
                 one_hot=False,
                 dtype=np.float32,
                 reshape=True):
            
           
        self._num_examples=data.shape[0]
            #print(self._num_examples)
        if reshape:
            #assert data.shape[2] ==1
            data = data.reshape (data.shape[0],
                                      data.shape[1])
        if dtype == np.float32:
            data = data.astype(np.float32)
        self._data = data
        self._labels= labels
        self._epochs_completed =0
        self._index_in_epoch =0
        
    @property
    def data(self):
        return self._data
            
    @property
    def labels(self):
        return self._labels
            
    @property
    def num_exanples(self):
        return self._num_examples
            
    @property
    def epochs_completed(self):
        return self._epochs_completed
        
        
    def next_batch(self, batch_size, shuffle =True):
        """ Return the bext 'batch_size' examples from this data set."""
        start=self._index_in_epoch
                
        #Shuffle for the first epoch
                
        if self._epochs_completed ==0 and start ==0 and shuffle:
            perm0=np.arange(self._num_examples)
            np.random.shuffle(perm0)
            self._data=self.data[perm0]
            self._labels=self.labels[perm0]
                    
        #go to the next epoch
        if start+batch_size > self._num_examples:
            #finished epoch
            self._epochs_completed +=1
            #get the rest examples in this epoch
            rest_num_examples = self._num_examples - start
            data_rest_part = self.data[start : self._num_examples]
            labels_rest_part =self._labels[start: self._num_examples]
            #Shuffle the data      
            if shuffle:
                perm=np.arange(self._num_examples)
                np.random.shuffle(perm)
                self._data=self.data[perm]
                self._labels=self.labels[perm]
                        
            # Start next epoch
                start = 0
                self._index_in_epoch =batch_size-rest_num_examples
                end=self._index_in_epoch
                data_new_part = self._data[start: end]
                labels_new_part = self._labels[start: end]
                return np.concatenate((data_rest_part, data_new_part), axis=0),np.concatenate((labels_rest_part, labels_new_part), axis=0)                    
        else:
            self._index_in_epoch += batch_size
            end=self._index_in_epoch
            return self._data[start: end], self._labels[start: end]

          
                    
def read_data_sets( fake_data=False,
                    one_hot =False,
                    dtype = np.float32,
                    reshape = True,):
                     
    train_data , train_labels,test_data, test_labels=input_fn('table_9.csv')
        
    
    train =DataSet(train_data, train_labels, dtype =dtype, reshape =reshape)
    test = DataSet(test_data, test_labels, dtype=dtype, reshape =reshape)
    
    return Datasets(train=train, test=test)

#Focal loss

def focal_loss(prediction_tensor, target_tensor, weights=None, alpha=0.25, gamma=2):
    r"""Compute focal loss for predictions.
        Multi-labels Focal loss formula:
            FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
                 ,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
    Args:
     prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
     target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
     weights: A float tensor of shape [batch_size, num_anchors]
     alpha: A scalar tensor for focal loss alpha hyper-parameter
     gamma: A scalar tensor for focal loss gamma hyper-parameter
    Returns:
        loss: A (scalar) tensor representing the value of the loss function
    """
    sigmoid_p = tf.nn.sigmoid(prediction_tensor)
    zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
    
    # For poitive prediction, only need consider front part loss, back part is 0;
    # target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
    pos_p_sub = array_ops.where(target_tensor > zeros, target_tensor - sigmoid_p, zeros)
    
    # For negative prediction, only need consider back part loss, front part is 0;
    # target_tensor > zeros <=> z=1, so negative coefficient = 0.
    neg_p_sub = array_ops.where(target_tensor > zeros, zeros, sigmoid_p)
    per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \
                          - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
    return tf.reduce_sum(per_entry_cross_ent)


class CNN:
    def __init__(self,alpha,batch_size,num_classes,num_features):
        """ Initialize the CNN model
        :param alpha: the learning rate to be used by the model
        :param batch_size : the number of batches to use for training
        :param num_classes: the number of classes in the dataset
        :param num_features: the number of features in the dataset
        """
        
        self.alpha= alpha
        self.batch_size = batch_size
        self.name='CNN'
        self.num_classes = num_classes
        self.num_features = num_features
        
        def __graph__():
        
            #[batch_size, num_features]
            x_input = tf.placeholder(dtype=tf.float32,shape=[None, num_features], name='x_input')
            
            #[batch_size, num_classes*num_labels]
            y_input=tf.placeholder(dtype= tf.float32, shape=[None,num_classes], name='actual_label')
            
            
            input_layer = tf.reshape(x_input,[-1,62,1,1])
            
          
            #convolution layer1
            conv = tf.layers.conv2d(input_layer, filters=6, kernel_size=[3,1],padding='valid')
            norm = tf.layers.batch_normalization(conv)
            activation =relu(norm)
            pool=tf.layers.max_pooling2d(activation, pool_size =[2,1], strides=2)
            pool = tf.layers.dropout(inputs=pool, rate=0.2)
            #convolution layer2
            conv = tf.layers.conv2d(pool, filters =8, kernel_size = [3,1], padding ='same')
            norm = tf.layers.batch_normalization(conv)
            activation = relu(norm)
    
            conv = tf.layers.conv2d(pool, filters =8, kernel_size = [3,1], padding ='same')
            norm = tf.layers.batch_normalization(conv)
            activation =relu(norm)
            pool = tf.layers.max_pooling2d(activation, pool_size = [2,1], strides=2)
            pool = tf.layers.dropout(inputs=pool, rate=0.5)
            
            
            # Dropout, to avoid over-fitting
            keep_prob = tf.placeholder(tf.float32)
            dropout= tf.layers.dropout(pool,keep_prob)
            #flatten abstract feature
            flat_1 = tf.reshape(pool,[-1, 1*15*8])
            
            
            #fully connected layers
            dense = tf.layers.dense(flat_1, units=120, activation= relu)
            dense_= tf.layers.dense(dense, units=152, activation = relu )
    
            #classification 
            digit1 = tf.layers.dense(dense, units=8)
    
           
        
            #loss function
            digit1_loss = tf.reduce_mean( focal_loss( digit1,y_input,2))
          
            loss = digit1_loss
            tf.summary.scalar('loss',loss)
            optimizer = tf.train.AdamOptimizer(learning_rate=alpha).minimize(loss)
            
            #accuracy
            digit1=tf.identity(tf.nn.softmax(digit1))
          
            output= tf.argmax(digit1,1)
            label = tf.argmax(y_input,1)
            
            correct_pred= tf.equal(output, label)
            accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
            
            tf.summary.scalar('accuracy', accuracy)
            
            merged = tf.summary.merge_all()
            
            self.x_input = x_input
            self.y_input  = y_input
            self.keep_prob =keep_prob
            self.digit1=digit1
            self.loss = loss
            self.optimizer=optimizer
            self.accuracy = accuracy
            self.merged = merged
            
        sys.stdout.write('\n<log> Building graph...')
        __graph__()
        sys.stdout.write('</log>\n')
        

    def train(self,checkpoint_path, epochs,log_path, train_data,test_data):
        """Trains the initialized model.
        :param checkpoint_path: The path where to save the trained model.
        :param log_path: The path where to save the TensorBoard logs.
        :param train_data: The training dataset.
        :param test_data: The testing dataset.
        :return: None
        """
        
        if not os.path.exists(path=log_path):
            os.mkdir(log_path)
            
        if not os.path.exists(path=checkpoint_path):
            os.mkdir(checkpoint_path)
            
        
        saver= tf.train.Saver(max_to_keep=4)
        
        init = tf.global_variables_initializer()
        
        timestamp = str(time.asctime())
            
        train_writer = tf.summary.FileWriter(logdir=log_path +'-training', graph=tf.get_default_graph())
        
        with tf.Session() as sess:
            sess.run(init)
            
            checkpoint = tf.train.get_checkpoint_state(checkpoint_path)
            
            if checkpoint and checkpoint.model_checkpoint_path:
                saver = tf.train.import_meta_graph(checkpoint.model_checkpoint_path + '.meta')
                saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))
                
            for index in range(epochs): 
                #train by batch
                batch_features, batch_labels = train_data.next_batch(self.batch_size)
                
                #input dictionary with dropout of 50%
                feed_dict = {self.x_input:batch_features, self.y_input:batch_labels,self.keep_prob:0.3}
                
                # run the train op
                summary, _, loss = sess.run([self.merged, self.optimizer, self.loss], feed_dict=feed_dict)
                
                if index % 100 ==0:
                    feed_dict = {self.x_input: batch_features, self.y_input: batch_labels, self.keep_prob: 1.0}
                    # get the accuracy of training
                    train_accuracy = sess.run(self.accuracy, feed_dict=feed_dict)
                    
                    #dispaly the training accuracy
                    print('step: {}, training accuracy : {}, training loss : {}'.format(index, train_accuracy, loss))
                    
                    train_writer.add_summary(summary=summary, global_step=index)
                    saver.save(sess, save_path=os.path.join(checkpoint_path, self.name), global_step=index)
                    
            test_features= test_data.data
            test_labels = test_data.labels

            feed_dict= {self.x_input:test_features, self.y_input:test_labels}    
            test_accuracy = sess.run(self.accuracy, feed_dict=feed_dict)
            print('Test Accuracy: {}'.format(test_accuracy)) 
         


if __name__ == '__main__':
    
    data=read_data_sets()
    train_data=data.train
    test_data = data.test
    num_classes=8      
    num_features = 62
    model=CNN(alpha=0.002, batch_size=128, num_classes=num_classes, num_features=num_features)
    model.train(checkpoint_path='C:/tmp/convnet_model6',epochs=50000, log_path='C:/tmp/tensorflow/logs',
                train_data=train_data, test_data=test_data)

这里

model.train(checkpoint_path='C:/tmp/convnet_model6',epochs=50000, log_path='C:/tmp/tensorflow/logs',

train_data=train_data, test_data=test_data)

也可以改为:

model.train(checkpoint_path=r'C:\tmp\convnet_model6',epochs=50000, log_path=r'C:\tmp\tensorflow]logs',

train_data=train_data, test_data=test_data)




评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值