tensorflow读取csv文件及搭建神经网络训练

1.读取csv文件

import tensorflow as tf  
# 生成一个先入先出队列和一个QueueRunner,生成文件名队列 
def read_batch(filenames,batchsize):
    
    #filenames = ['mer_test/test_set.csv']  
    filename_queue = tf.train.string_input_producer(filenames, shuffle=False)  
    # 定义Reader  
    reader = tf.TextLineReader()  
    key, value = reader.read(filename_queue)  
    # 定义Decoder  
    record_defaults = [[0.0] for _ in range(65)] 
    data= tf.decode_csv(value,record_defaults=record_defaults)  
    
    features=data[1:65]
    #features=(features-tf.reduce_min(features))/(tf.reduce_max(features)-tf.reduce_min(features))
    label=data[0]
    label=tf.cast(label,dtype=tf.uint8)
    label=tf.one_hot(label,2)
    features_batch, label_batch = tf.train.batch([features,label], batch_size=batchsize, capacity=3*batchsize,)  
    return features_batch, label_batch
#测试是否正确读取
if __name__=='__main__':
    filenames = ['test_set.csv'] 
    example_batch, label_batch=read_batch(filenames,2)    
    # 运行Graph  
    with tf.Session() as sess:  
        sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])
        coord = tf.train.Coordinator()  #创建一个协调器,管理线程  
        threads = tf.train.start_queue_runners(coord=coord)  
        for i in range(5):  
            e_val,l_val = sess.run([example_batch, label_batch])  
            print (e_val.shape,l_val)  
        coord.request_stop()  
        coord.join(threads) 

2.模型训练
1)逻辑回归模型
只需要一个结点(神经元),输出使用softmax函数

import tensorflow as tf
from read_data import read_batch
#逻辑回归模型
def logistic(x,num_input,num_output):
    w = tf.Variable(tf.truncated_normal([num_input,num_output],stddev=0.1))
    b = tf.Variable(tf.constant(0.0, shape=[num_output]))

    soft_value = tf.nn.softmax(tf.matmul(x,w)+b)
    return soft_value
#模型参数
num_input=404
num_output=2
batchsize=400
train_file = ['train_set1.csv']
test_file = ['test_set1.csv'] 
trian_feature, trian_label=read_batch(train_file,batchsize) 
test_feature, test_label=read_batch(test_file,batchsize) 

x = tf.placeholder(tf.float32, [None, num_input]) 
y = tf.placeholder(tf.float32, [None, num_output])  

soft_value =logistic(x,num_input,num_output)

#损失函数
loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(soft_value),reduction_indices=1))

#利用梯度下降进行参数更新
optimizer = tf.train.GradientDescentOptimizer(0.01)
 
#对模型进行训练
train = optimizer.minimize(loss)


pred = tf.equal(tf.argmax(soft_value,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(pred,tf.float32))

#迭代次数
train_epochs = 2000

#每迭代多少次显示一下
display_step = 100

with tf.Session() as sess: #开始一个会话
    #初始化,开启队列,协调器
    sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])
    coord=tf.train.Coordinator()
    threads= tf.train.start_queue_runners(coord=coord)
    for i in range(train_epochs):  
                
        train_x, train_y=sess.run([trian_feature, trian_label])                  
        sess.run(train,feed_dict={x:train_x,y:train_y})
        if i%100==0:
            train_cost,train_acc=sess.run([loss,accuracy],feed_dict={x:train_x,y:train_y})
            print("step:%d,cost:%f,train_acc:%f"%(i,train_cost,train_acc))
    #测试集        
    test_avg_acc=0.0
    for j in range(200):
        test_x,test_y=sess.run([test_feature, test_label])
        test_cost,test_acc=sess.run([loss,accuracy],feed_dict={x:test_x,y:test_y})
        print("step:%d,cost:%f,test_acc:%f"%(j,test_cost,test_acc))
        test_avg_acc+=test_acc/200   
    print('test_acc:%f'%test_avg_acc) 
    coord.request_stop()
    coord.join(threads) 

2)全连接神经网络
相当于多个结点的逻辑回归,可解决线性不可分
建立隐藏层为两层的全连接神经网络,激活函数为relu

import tensorflow as tf
from read_data import read_batch
def neural_net(x,num_input,num_classes):
    n_hidden_1 = 200 
    n_hidden_2 = 50    
    weights = {
    'h1' : tf.Variable(tf.random_normal([num_input, n_hidden_1])),
    'h2' : tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out' : tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
}
    biases = {
    'b1' : tf.Variable(tf.random_normal([n_hidden_1])),
    'b2' : tf.Variable(tf.random_normal([n_hidden_2])),
    'out' : tf.Variable(tf.random_normal([num_classes]))
    }

    layer_1 = tf.add(tf.matmul(x, weights['h1']) , biases['b1'])
    layer_1=tf.nn.relu(layer_1)
    
    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']) , biases['b2'])
    
    layer_2=tf.nn.relu(layer_2)
    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
    return out_layer

batchsize=200
num_input=404
num_classes=2
train_file = ['train_set1.csv']
test_file = ['test_set1.csv'] 
train_x, train_y=read_batch(train_file,batchsize) 
test_x, test_y=read_batch(test_file,batchsize) 

x=tf.placeholder(tf.float32,[None,num_input])
y=tf.placeholder(tf.float32,[None,num_classes])


predict=neural_net(x,num_input,num_classes)

soft_predict=tf.nn.softmax(predict)

cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predict,labels=y))
learning_rate=0.01
train_step=tf.train.AdamOptimizer(learning_rate).minimize(cost)
##定义准确率
correct_prediction=tf.equal(tf.argmax(soft_predict,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) 
train_epochs=2000
display_step = 100
with tf.Session() as sess: #开始一个会话
    #初始化,开启队列,协调器
    sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])
    coord=tf.train.Coordinator()
    threads= tf.train.start_queue_runners(coord=coord)
    for i in range(train_epochs):          
        x_, y_=sess.run([train_x, train_y])
        _,loss,acc=sess.run([train_step,cost,accuracy],feed_dict={x:x_,y:y_})
        if i%display_step==0:
            print("step:%d,cost:%f,train_acc:%f"%(i,loss,acc))  

    test_avg_acc=0.0
    for j in range(200):
        t_x,t_y=sess.run([test_x,test_y])
        test_cost,test_acc=sess.run([cost,accuracy],feed_dict={x:t_x,y:t_y})
        print(test_cost,test_acc)
        test_avg_acc+=test_acc/200   
    print('test_acc:%f'%test_avg_acc)
    coord.request_stop()
    coord.join(threads)   

在这里插入图片描述
cost不收敛的原因可能是学习率太大,可以适当减少学习率。也可能是数据问题。我这里是数据问题,换个特征效果就很好

注意事项:因为读取数据使用了队列,除了开启队列,还需要使用协调器来管理。

  • 5
    点赞
  • 37
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值