做这个简单LSTM网络的原因,尝试做一个多分类的网络,但是尝试了半个多月了,结果一直十分不理想,因为是初学者,难免对自己是否真的正确使用Tensorflow搭建LSTM网络,因此决定使用之前同样的LSTM网络来对数学函数进行预测分类,如果能正确分类,即可确定逻辑正确。
数据:自己使用numpy生成的一组sin函数和tan函数值,x的范围随机,使用np.linspace()等分割,分割段数最大为100(不定长),然后数据文件名'-'后面的数字就是标签,sin是0,tan是1
由于很简单就不贴代码了,但是为了写的更清楚,我直接把数据上传到网盘了,百度云盘下载地址:数据,数据集比较小,大家可以自行生成更大的数据集 很抱歉数据没有了,我也懒的找了,直接将生成数据代码更改一下并将代码里面的一些小问题改一下,同时将预测一个数据改成多个数据。下面就直接贴出代码:
#使用LSTM网络看是否可以成功分类sin函数和tan函数
import pandas as pd
import numpy as np
import os
import tensorflow as tf
import random
import math
num_steps = 100
batch_size = 2
state_size = 10
num_features = 1
num_classes = 2
learning_rate = 1e-4
# 生成数据的函数
def get_data_set(data_num):
label = np.array([random.randint(0,1) for _ in range(data_num)])
data = np.zeros((data_num,num_steps),dtype=np.float32)
# 数据随机产生并填充,原来是再末尾填充0,直接生成数据的话我就随机填充,更符合实际情况
for data_index in range(data_num):
if label[data_index]==1:
data_list=[i for i in map(math.tan,np.linspace(random.random()*math.pi,random.randint(15,100),random.randint(50,100)))]
else:
data_list=[i for i in map(math.sin,np.linspace(random.random()*math.pi,random.randint(15,100),random.randint(50,100)))]
for i in range(num_steps-len(data_list)):
data_list.insert(random.randint(0,len(data_list)),0)
data[data_index] = np.array(data_list)
return data,label
#数据在读取时已经做好了填充,且已是打乱的数据
def getDataAndLabel(raw_data,raw_label,num_steps,batch_size):
data = np.zeros([batch_size,num_steps,num_features],np.float32)
labels = np.zeros([batch_size,1],np.int32)
# 根据batch_size确定单epoch训练步数
for step_index in range(len(raw_data)//batch_size):
data = raw_data[step_index*batch_size:(step_index+1)*batch_size].reshape(batch_size,-1,num_features)
labels = raw_label[step_index*batch_size:(step_index+1)*batch_size].reshape(batch_size,1)
yield data,labels
def genEpochs(num_epochs,raw_data,raw_label,num_steps,batch_size):
for i in range(num_epochs):
yield getDataAndLabel(raw_data,raw_label,num_steps,batch_size)
def buildNewLSTMGraph(
num_steps = num_steps,
batch_size = batch_size,
state_size = state_size,
learning_rate = learning_rate):
#初始化视图
tf.reset_default_graph()
#给定输入和标签管道
x = tf.placeholder(shape=[batch_size,num_steps,num_features],dtype=tf.float32)
y = tf.placeholder(shape=[batch_size,1],dtype=tf.int32)
#使用静态list作为输入和标签,squeeze返回的是一个张量
x_temp = tf.squeeze(tf.split(value=x,num_or_size_splits=num_steps,axis=1),axis=2)
rnn_inputs = list()
rnn_labels = list()
for i in range(x_temp.shape[0]):
rnn_inputs.append(x_temp[i])
for i in range(y.shape[0]):
rnn_labels.append(tf.one_hot(y[i],depth=num_classes))
#定义网络节点
cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=state_size,state_is_tuple=True)
init_state = cell.zero_state(batch_size,tf.float32)
rnn_outpus, final_state = tf.contrib.rnn.static_rnn(cell=cell,initial_state=init_state,inputs=rnn_inputs)
#使用LSTM的最后状态对函数进行预测
with tf.variable_scope('fullyConnLayer'):
#第一层全连接层,输出状态为64种
W1 = tf.get_variable('W1',shape=[state_size,64])
b1 = tf.get_variable('b1',shape=[64],initializer=tf.constant_initializer(dtype=tf.float32,value=0))
tempState1 = [tf.matmul(final_state[1],W1) + b1]
tempState1 = tf.nn.relu(tempState1)
#第二层全连接层,输出状态为32种
W2 = tf.get_variable('W2',shape=[64,32])
b2 = tf.get_variable('b2',shape=[32],initializer=tf.constant_initializer(dtype=tf.float32,value=0))
tempState2 = [tf.matmul(tempState1[0],W2) + b2]
tempState2 = tf.nn.relu(tempState2)
#第三层全连接层,进行分类
W3 = tf.get_variable('W3',shape=[32,num_classes])
b3 = tf.get_variable('b3',shape=[num_classes],initializer=tf.constant_initializer(dtype=tf.float32,value=0))
logits = [tf.matmul(tempState2[0],W3) + b3]
#softmax的话则计算交叉熵
losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels=rnn_labels,logits=logits)
total_loss = tf.reduce_mean(losses)
#优化器的选择
train_step = tf.train.AdamOptimizer(learning_rate).minimize(losses)
return dict(
x = x,
y = y,
init_state = init_state,
final_state = final_state,
total_loss = total_loss,
train_step = train_step,
pred = tf.nn.softmax(logits),
labels = rnn_labels,
)
#这一部分是用来做最初的训练,主要是将模型数据保存下来以便后面继续训练
#我比较懒,没有保存交叉熵最小的几个模型,大家如有需要自己加个判断即可
def train_rnn(g,num_epochs,batch_size=batch_size,num_steps=num_steps):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_losses = []
for idx,epoch in enumerate(genEpochs(batch_size=batch_size,num_epochs=num_epochs,num_steps=num_steps,
raw_data=dataArray,raw_label=labelArray)):
train_loss = 0
step = 0
training_state = None
for X,Y in epoch:
step +=1
feed_dict = {g['x']:X,g['y']:Y}
if(training_state != None):
feed_dict[g['init_state']] = training_state
train_loss_, training_state,_ = sess.run([g['total_loss'],
g['final_state'],
g['train_step']],
feed_dict = feed_dict)
train_loss += train_loss_
print('epoch: {0}的平均损失值:{1}'.format(idx, train_loss/step))
train_losses.append(train_loss/step)
#保存模型
saver=tf.train.Saver(max_to_keep=1)
path = saver.save(sess,'Saved_model/funcClassify')
print("保存至",path)
return train_losses
# 生成训练数据
dataArray,labelArray=get_data_set(200)
g = buildNewLSTMGraph()
train_rnn(g,3)
#执行下面代码可以对保存的神经网络进行再次训练
g = buildNewLSTMGraph()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
num_epochs = 500
with sess:
saver = tf.train.Saver()
saver.restore(sess,'Saved_model/funcClassify')
train_losses = []
for idx,epoch in enumerate(genEpochs(batch_size=batch_size,num_epochs=num_epochs,
num_steps=num_steps,raw_data=dataArray,raw_label=labelArray)):
train_loss = 0
step = 0
prediction = list()
labels = list()
training_state = None
for X,Y in epoch:
step +=1
feed_dict = {g['x']:X,g['y']:Y}
if(training_state != None):
feed_dict[g['init_state']] = training_state
train_loss_, training_state,_ = sess.run([g['total_loss'],
g['final_state'],
g['train_step']],
feed_dict = feed_dict)
train_loss += train_loss_
print('epoch: {0}的平均损失值:{1}'.format(idx, train_loss/step))
#保存模型
saver=tf.train.Saver(max_to_keep=1)
path = saver.save(sess,'Saved_model/funcClassify')
print('模型已保存到',path)
由于数据集较小不太好训练,不过正好练习了一下手动调学习率,训练到300多epochs时交叉熵出现第一次骤减,最终以1e-4的学习率,交叉熵在0.000135左右抖动,将学习率降到1e-5时,经过1000epochs左右降至0.000118左右,后面再对学习率进行调整就请大家自己尝试了(其实是因为我又改小了,然后一直没训练出来,还在一直增大,如下图)。
仅看交叉熵没什么意思,对网络进行测试一下,看是否能成功区分两个函数的输入,只需要对上述代码进行简单调整,并再次随机生成数据(如果自行生成数据,则直接留一定比例数据做测试数据即可),修改部分如下:
#生成测试数据
dataArray,labelArray=get_data_set(50)
#下面则段代码只和上面有一点点改动,我已经标出
g = buildNewLSTMGraph()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# num_epochs = 1即可,因为模型没有训练,预测结果不会变,这里是给大家看一下确实不会变
num_epochs = 5
with sess:
saver = tf.train.Saver()
saver.restore(sess,'Saved_model/funcClassify')
train_losses = []
for idx,epoch in enumerate(genEpochs(batch_size=batch_size,num_epochs=num_epochs,
num_steps=num_steps,raw_data=dataArray,raw_label=labelArray)):
train_loss = 0
step = 0
prediction = list()
labels = list()
training_state = None
for X,Y in epoch:
step +=1
feed_dict = {g['x']:X,g['y']:Y}
if(training_state != None):
feed_dict[g['init_state']] = training_state
#将g['train_step']替换为g['pred'],这样网络将不会进行优化,只用于预测
train_loss_, training_state,pred = sess.run([g['total_loss'],
g['final_state'],
g['pred']],
feed_dict = feed_dict)
train_loss += train_loss_
#将预测结果输出,使用argmax获得类标签,每次只输出最后两个预测结果
print(pred)
print('epoch: {0}的平均损失值:{1}'.format(idx, train_loss/step))
这是我自己的预测结果:
可以看出能够准确对其进行分类的,无疑让我心安了一点点。