什么是 LSTM RNN 循环神经网络 (深度学习)?
什么是LSTM?
"长短期记忆(Long short-term memory, LSTM)是一种特殊的RNN,主要是为了解决长序列训练过程中的梯度消失和梯度爆炸问题。简单来说,就是相比普通的RNN,LSTM能够在更长的序列中有更好的表现。" [1]
用LSTM提取时序特征的优越性 VS 传统的特征提取:HOG, LBP
LSTM 模型的训练过程
LSTM 模型运行的结果
实现代码
#Code borrowed from: 骆旺达
#Modify by tony2278 20200409
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
data_csv = pd.read_csv('./data.csv',usecols=[1])
#plt.plot(data_csv)
#plt.show()
# 数据预处理
data_csv = data_csv.dropna() # 滤除缺失数据
dataset = data_csv.values # 获得csv的值
dataset = dataset.astype('float32')
max_value = np.max(dataset) # 获得最大值
min_value = np.min(dataset) # 获得最小值
scalar = max_value - min_value # 获得间隔数量
dataset = list(map(lambda x: x / scalar, dataset)) # 归一化
def create_dataset(dataset, look_back=2):
dataX, dataY = [], []
for i in range(len(dataset) - look_back):
a = dataset[i:(i + look_back)]
dataX.append(a)
dataY.append(dataset[i + look_back])
return np.array(dataX), np.array(dataY)
# 创建好输入输出
data_X, data_Y = create_dataset(dataset)
# 划分训练集和测试集,70% 作为训练集
train_size = int(len(data_X) * 0.7)
test_size = len(data_X) - train_size
train_X = data_X[:train_size]
train_Y = data_Y[:train_size]
test_X = data_X[train_size:]
test_Y = data_Y[train_size:]
train_X = train_X.reshape(-1, 1, 2)
train_Y = train_Y.reshape(-1, 1, 1)
test_X = test_X.reshape(-1, 1, 2)
train_x = torch.from_numpy(train_X)
train_y = torch.from_numpy(train_Y)
test_x = torch.from_numpy(test_X)
from torch import nn
from torch.autograd import Variable
class lstm(nn.Module):
def __init__(self,input_size=2,hidden_size=4,output_size=1,num_layer=2):
super(lstm,self).__init__()
self.layer1 = nn.LSTM(input_size,hidden_size,num_layer)
self.layer2 = nn.Linear(hidden_size,output_size)
def forward(self,x):
x,_ = self.layer1(x)
s,b,h = x.size()
x = x.view(s*b,h)
x = self.layer2(x)
x = x.view(s,b,-1)
return x
model = lstm(2, 4,1,2)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
# 开始训练
for e in range(1000):
var_x = Variable(train_x)
var_y = Variable(train_y)
# 前向传播
out = model(var_x)
loss = criterion(out, var_y)
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (e + 1) % 100 == 0: # 每 100 次输出结果
print('Epoch: {}, Loss: {:.5f}'.format(e + 1, loss.item()))
model = model.eval() # 转换成测试模式
data_X = data_X.reshape(-1, 1, 2)
data_X = torch.from_numpy(data_X)
var_data = Variable(data_X)
pred_test = model(var_data) # 测试集的预测结果
# 改变输出的格式
pred_test = pred_test.view(-1).data.numpy()
#array = np.asarray(dataset)
#array = array.squeeze()
#print(array[:20])
dataset = np.hstack(dataset)
print(dataset[:20])
# 画出实际结果和预测的结果
plt.plot(pred_test, 'r', label='prediction')
plt.plot(dataset, 'b', label='real')
plt.legend(loc='best')
plt.show()
Reference
[1] https://zhuanlan.zhihu.com/p/32085405