LSTM预测

Lstm example1

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import tensorflow as tf
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
%matplotlib inline


# 加载数据
dataframe = pd.read_csv('C:/Users/Dell/Desktop/international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
dataset = dataframe.values
# 将整型变为float
dataset = dataset.astype('float32')
fig = plt.figure(figsize=(10, 5))
plt.plot(dataset)
plt.show()

# X is the number of passengers at a given time (t) and Y is the number of passengers at the next time (t + 1).

# 将值数组转换为数据集矩阵
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back-1):
        a = dataset[i:(i+look_back), 0]
        dataX.append(a)
        dataY.append(dataset[i + look_back, 0])
    return np.array(dataX), np.array(dataY)
# print(dataset)
# t1=create_dataset(dataset, look_back=1)
# print(t1)
# fix random seed for reproducibility
np.random.seed(7)
# 将 dataset归一化
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# print(dataset,'\n')

# 划分训练集(前67%)和测试集(后33%)
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]

# 准备X=t且Y=t+1时的数据,并且此时的维度为 [samples, features]
# use this function to prepare the train and test datasets for modeling
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)

# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# (len(dataX), 3, 1)表示运行lstm三次迭达,输入形状为(1,)输入向量
# (len(dataX), 1, 3)表示运行lstm一次迭达,输入RNN形状为(3,)

#-----------建立 LSTM 模型-------------
# 输入层有 1 个input,隐藏层有 4 个神经元,输出层就是预测一个值,激活函数用 sigmoid,迭代 100 次,batch size 为 1
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
#每层LSTM迭代4个隐藏神经元h
model.add(Dense(1))
#输出是1维(一个神经元)
model.compile(loss='mean_squared_error', optimizer='adam')
#adam优化利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率.优点:每一次迭代学习率都有一个明确的范围,使得参数变化很平稳.
#adam优化是最常用的optimizer
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=0)
# batch_size:整数 ,每次梯度更新的样本数(samples)
# epochs:整数,训练模型迭代次数
# verbose:日志展示,整数
#                0 :为不在标准输出流输出日志信息
#                1 :显示进度条
#                2 :每个epoch输出一行记录

#-----------预测-------------
# make predictions
trainPredict = model.predict(trainX,batch_size = 20)
testPredict = model.predict(testX,batch_size = 20)
#这里的batch_size表示每次梯度更新的样本数,默认是32
#计算误差之前要先把预测数据转换成同一单位
# 反归一化预测数据inverse_transform()
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])

#计算 mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
def mape(y_true, y_pred):
    return np.mean(np.abs((y_pred - y_true) / y_true)) * 100

def smape(y_true, y_pred):
    return 2.0 * np.mean(np.abs(y_pred - y_true) / (np.abs(y_pred) + np.abs(y_true))) * 100
#mape平均绝对百分比误差,当真实值有数据等于0,而预测值也等于0时,存在分母0除问题,该公式不可用
#smape对称平均绝对百分比误差,当真实值有数据等于0,而预测值也等于0时,存在分母0除问题,该公式不可用
print(mape(trainY[0], trainPredict[:,0]))
#这里输出的就是预测值和真实值之间的绝对百分比误差mape,8.36%
print(smape(testY[0], testPredict[:,0]))
#这里输出的就是预测值和真实值之间的对称绝对百分比误差smape,9.67%

#----------画出结果:蓝色为原数据,绿色为训练集的预测值,红色为测试集的预测值----------
# shift train predictions for plotting
fig = plt.figure(figsize=(10, 5))
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict

# shift test predictions for plotting
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict

# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()

LSTM Back-Propagation_test

import tensorflow as tf
import numpy as np

np.random.seed(0)
# Keras 会随机初始化参数,所以每一次实验的结果都是不一致的。seed(0)保证了每次选用相同的随机数,便于验证
def get_crossentropy(y_pred, y_true):
    return -tf.reduce_sum(y_true * tf.math.log(y_pred))
#tf.reduce_sum( )是求和函数,在tensorflow 里面,计算的都是 tensor,可以通过调整 axis =0,1 的维度来控制求和维度。
#tf.math.log()只能表示自然对数ln(),求其他底数的对数时要用换底公式
y_true = np.array([[[0.3, 0.5, 0.2],
                   [0.2, 0.3, 0.5],
                   [0.5, 0.2, 0.3]]]).astype(np.float32)

inputs = np.random.random([1, 3, 4]).astype(np.float32)
#np.random.random([a,b,c])表示生成1句子,3个词,4个词元素的(1,3,4)的随机矩阵
#astype()强制转换数据类型
#这里表示输入inputs是一条步长为3的有4维特征的数据;标签数据(y_true)步长也为3,每个步长上是长度为3的概率向量
# 初始化c和h状态: 第一个是h状态,第二个是c
init_state = [tf.constant(np.random.random((inputs.shape[0], 2)).astype(np.float32)),
              tf.constant(np.random.random((inputs.shape[0], 2)).astype(np.float32))]
#shape[0]表示矩阵的行,shape[1]表示矩阵的列

# 定义LSTM layer
lstm_cell = tf.keras.layers.LSTMCell(2)
lstm = tf.keras.layers.RNN(lstm_cell, return_sequences=True, return_state=True)
#RNN的线性关系矩阵W.shape=(cell,cell)
#return_sequences表示返回输出序列中的最后一个输出(False,默认),还是全部序列(true)
#return_state表示除了输出之外是否返回最后一个状态(默认False,不返回)
#具体lstm实现详见RNN语句实现
lstm_seq, last_h, last_c = lstm(inputs=inputs, initial_state=init_state)

# fnn layer
dense = tf.keras.layers.Dense(3)
# dense()表示全连接层,Dense(3)表示输出是三维的,或者说输出有三个神经元

# 最后得到y
output_seq = tf.math.softmax(dense(lstm_seq))
# tf.nn.softmax(),经过softmax后,有把最大值放大的过程,相当于把强的变得更强,把弱的变得更弱
#同时softmax() 自行将所有元素归一化,变成概率输出

# print(inputs.shape,'\n')
#print(inputs,'\n')
# print(y_true.shape,'\n')
# print(y_true,'\n')
# print(lstm_seq,'\n')
# print(output_seq)
# print(lstm.weights,'\n')

# ----------lstm前向传播验证-------------
# 提取lstm的变量
kernel = lstm.weights[0]
# 四个线性系数Wf,Wi,Wa,Wo构成矩阵kernel
recurrent_kernel = lstm.weights[1]
# 循环过程的四个线性系数Wf,Wi,Wa,Wo构成矩阵recurrent_kernel
bias = lstm.weights[2]
# 偏倚bf,bi,ba,bo构成bias
with tf.GradientTape(persistent=True) as t:
    h0, c0 = init_state
    # --------stpe1-----------
    z1 = tf.matmul(inputs[:, 0, :], kernel)
    # inputs[:, 0, :]第一个时间步,tf.matmul() 做矩阵乘法
    z1 += tf.matmul(init_state[0], recurrent_kernel)
    z1 += bias
# 这里的z1.shape=(1, 8),从前往后每两个(隐藏状态h和细胞状态c的dim=2)分别对应f,i,a,o的参数
    i1 = tf.math.sigmoid(z1[:, 0:2])
    f1 = tf.math.sigmoid(z1[:, 2:4])
    c1 = f1 * init_state[1] + i1 * tf.math.tanh(z1[:, 4:6])
    t.watch(c1)
    o1 = tf.math.sigmoid(z1[:, 6:8])

    h1 = o1 * tf.math.tanh(c1)
    t.watch(h1)
    out1 = dense(h1)
    t.watch(out1)
    a1 = tf.math.softmax(out1)
    t.watch(a1)
    # --------stpe2-------------
    z2 = tf.matmul(inputs[:, 1, :], kernel)
    z2 += tf.matmul(h1, recurrent_kernel)
    z2 += bias

    i2 = tf.math.sigmoid(z2[:, 0:2])
    f2 = tf.math.sigmoid(z2[:, 2:4])
    c2 = f2 * c1 + i2 * tf.math.tanh(z2[:, 4:6])
    t.watch(c2)
    o2 = tf.math.sigmoid(z2[:, 6:8])

    h2 = o2 * tf.math.tanh(c2)
    t.watch(h2)
    out2 = dense(h2)
    t.watch(out2)
    a2 = tf.math.softmax(out2)
    t.watch(a2)
    # ---------step3---------------
    z3 = tf.matmul(inputs[:, 2, :], kernel)
    z3 += tf.matmul(h2, recurrent_kernel)
    z3 += bias

    i3 = tf.math.sigmoid(z3[:, 0:2])
    f3 = tf.math.sigmoid(z3[:, 2:4])
    c3 = f3 * c2 + i3 * tf.math.tanh(z3[:, 4:6])
    t.watch(c3)
    o3 = tf.math.sigmoid(z3[:, 6:8])

    h3 = o3 * tf.math.tanh(c3)
    t.watch(c3)
    out3 = dense(h3)
    t.watch(out3)
    a3 = tf.math.softmax(out3)
    t.watch(a3)
    # ---------loss----------------
    my_seqout = tf.stack([a1, a2, a3], axis=1)
    my_loss = get_crossentropy(y_pred=my_seqout, y_true=y_true)
    # -------L(t)---------
    loss_1 = get_crossentropy(y_pred=my_seqout[:, 0, :], y_true=y_true[:, 0, :])
    loss_2 = get_crossentropy(y_pred=my_seqout[:, 1, :], y_true=y_true[:, 1, :])
    loss_3 = get_crossentropy(y_pred=my_seqout[:, 2, :], y_true=y_true[:, 2, :])
# print(np.asarray(a1).shape,'\n')
# print(tf.stack([a1,a2,a3],axis=0),'\n')
# print(tf.stack([a1,a2,a3],axis=1),'\n')
# stacks()拼接函数的理解:
#     举例:当N个张量的维度均为:(维1,维2, 维3, 维4), 此时axis的取值范围为:[-5, 5)
#     所以输入 stacks = [stack_data1, stack_data2,...,stack_dataN], st = tf.stack(stacks, axis=?)
#     此时:
#           stacks的维度为:(N,维1,维2, 维3, 维4 )   维度为5,所以输出维度也为5, axis取值就在[-5, 5)
#           当axis=0时, st维度为:(N, 维1, 维2, 维3, 维4)
#           当axis=1时, st维度为:(维1, N,维2, 维3, 维4)
#           当axis=2时, st维度为:(维1, 维2, N,维3, 维4)
#           当axis=3时, st维度为:(维1, 维2, 维3,N,维4)
#           当axis=4时, st维度为:(维1, 维2, 维3,维4,N)
#------output_seq和my_seqout对比, 验证前向传播算法是否正确
#为突出重点这里不会去验证前面定义好了的dense layer是否真的按照DNN的前向传播公式在做,dense layer的具体验证在DNN-paras-test中
print(output_seq,'\n')
print(my_seqout,'\n')


#---------lstm反向传播验证---------

#提取前传公式里每次循环的W
w_i = recurrent_kernel[:, 0:2]
w_f = recurrent_kernel[:, 2:4]
w_a = recurrent_kernel[:, 4:6]
w_o = recurrent_kernel[:, 6:8]
# --------验证 delta_hT---------
delta_hT = t.gradient(my_loss, h3)
my_delta_hT = tf.matmul((a3 - y_true[:, 2, :]), tf.transpose(dense.kernel))

# --------验证 delta_cT---------
delta_cT = t.gradient(my_loss, c3)
my_delta_cT = delta_hT * o3 * (1 - tf.math.tanh(c3)**2)
# -----------从 delta_c2 和 delta_h2 推导 delta_h1----------
a_2 = tf.math.tanh(z2[:, 4:6])
delta_h2 = t.gradient(my_loss, h2)
delta_c2 = t.gradient(my_loss, c2)
dc2_dh1 = tf.matmul(w_f, np.diag(tf.squeeze(c1 * f2 * (1 - f2)))) + \
             tf.matmul(w_i, np.diag(tf.squeeze(a_2 * i2 * (1 - i2)))) + \
             tf.matmul(w_a, np.diag(tf.squeeze(i2 * (1 - a_2**2))))
#tf.squeeze()表示逐元素相乘,即哈达玛积
delta_h1 = t.gradient(my_loss, h1)
my_delta_h1 = tf.matmul((a1 - y_true[:, 0, :]), tf.transpose(dense.kernel)) + \
             tf.matmul(delta_c2, tf.transpose(dc2_dh1)) + \
             tf.matmul(delta_h2, tf.transpose(tf.matmul(w_o, np.diag(tf.squeeze(tf.math.tanh(c2) * o2 * (1 - o2))))))
print(delta_h1,'\n')
print(my_delta_h1,'\n')
# --------------再从 delta_h1 和 delta_c2 推导 delta_c1---------
delta_c1 = t.gradient(my_loss, c1)
my_delta_c1 = o1 * (1 - tf.math.tanh(c1)**2) * delta_h1 + f2 * delta_c2
print(delta_c1,'\n')
print(my_delta_c1,'\n')
# -------------验证 dl_dW_f---------------
dl_dW_f = t.gradient(my_loss, recurrent_kernel)[:, 2:4]
my_dl_dW_f = tf.matmul(tf.transpose(h2), (delta_cT * c2 * f3 * (1 - f3))) + \
             tf.matmul(tf.transpose(h1), (delta_c2 * c1 * f2 * (1 - f2))) + \
             tf.matmul(tf.transpose(h0), (delta_c1 * c0 * f1 * (1 - f1)))
print(dl_dW_f,'\n')
print(my_dl_dW_f,'\n')

Lstm paras-calculation

import tensorflow as tf
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential
# tensorflow1.0版本后一律用tensorflow.python.keras.()导入keras.()模块
time_step=13
featrue=5
hidenfeatrue=10

model=Sequential()
model.add( LSTM(hidenfeatrue,input_shape=(time_step,featrue)))
model.summary()
# Sequential 序贯模型,序贯模型是函数式模型的简略版,为最简单的线性、从头到尾的结构顺序,不分叉,是多个网络层的线性堆叠。
#我们可以通过将各种网络结构层的列表传递给Sequential的构造函数,来创建一个Sequential模型
#首层的Sequential()需要指定input_shape=(时间步,特征量),模型用model.add()添加

#########模型概况查询(包括权重查询)
# model.summary()模型概括打印
# model.get_layer()依据层名或下标获得层对象
# model.get_weights()返回模型权重张量的列表,类型为numpy.array()
# model.set_weights()从numpy.array()里将权重载入给模型,要求数组具有与model.get_weights()相同的形状。
#########模型保存与加载
# model.save_weights(filepath)
# 将模型权重保存到指定路径,文件类型是HDF5(后缀是.h5)
# model.load_weights(filepath, by_name=False)
# 从HDF5文件中加载权重到当前模型中, 默认情况下模型的结构将保持不变。
# 如果想将权重载入不同的模型(有些层相同)中,则设置by_name=True,只有名字匹配的层才会载入权重

#%%

import tensorflow as tf
from tensorflow.python.keras import *
from tensorflow.python.keras.layers import *

x=Input(shape=(5,))
y=Dense(1,activation='softmax')(x)

model=Model(inputs=x,outputs=y)
model.summary()


#%%

import tensorflow as tf
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers.core import Dense, Dropout, Activation

model = Sequential() # 顺序模型

# 输入层
model.add(Dense(7, input_shape=(4,)))  # Dense作第一层要写input—_shape
model.add(Activation('sigmoid')) # 激活函数

# 隐层
model.add(Dense(13))  # Dense层为中间层
model.add(Activation('sigmoid')) # 激活函数

# 输出层
model.add(Dense(5))
model.add(Activation('softmax'))

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=["accuracy"])

model.summary()

Lstm test_prediction1

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import  pandas as pd
import math
import  tensorflow.python.keras.callbacks
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model
from tensorflow.python.keras.callbacks import Callback

#------输出y维度转化-------
'''
当训练完成后 我们使用一个shape为(1,20,2)的test_X去预测得到的y_hat是一个(1,10)的矩阵
所以我们还需要将y_hat转化为(5,2)的矩阵
'''
def reshape_y_hat(y_hat,dim):
    re_y = []
    i = 0
    while i < len(y_hat):
        tmp = []
        for j in range(dim):
            tmp.append(y_hat[i+j])
        i = i + dim
        re_y.append(tmp)
    re_y = np.array(re_y,dtype='float64')
    return  re_y

#多维反归一化
def FNormalizeMult(data,normalize):

    data = np.array(data,dtype='float64')
    #列
    for i in  range(0,data.shape[1]):
        listlow =  normalize[i,0]
        listhigh = normalize[i,1]
        delta = listhigh - listlow
        #行
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  data[j,i]*delta + listlow

    return data

#使用训练数据的归一化
def NormalizeMultUseData(data,normalize):

    for i in range(0, data.shape[1]):

        listlow = normalize[i, 0]
        listhigh = normalize[i, 1]
        delta = listhigh - listlow

        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data

#仅对最后20条数据进行测试 因为预测仅最新有作用
data = np.zeros(40)
data.dtype = 'float64'
data = data.reshape(20,2)
sinx=np.arange(0,8*np.pi,2*np.pi/5,dtype='float64')
siny=np.sin(sinx)
cosx=np.arange(0,8*np.pi,2*np.pi/5,dtype='float64')
cosy=np.cos(sinx)
data[:,0] = siny
data[:,1] = cosy


#归一化
normalize = np.load("./MultiSteup1.npy")
data = NormalizeMultUseData(data, normalize)
model = load_model("./MultiSteup1.h5")
test_X = data.reshape(1,data.shape[0],data.shape[1])
y_hat  =  model.predict(test_X)
#重组
y_hat = y_hat.reshape(y_hat.shape[1])
y_hat = reshape_y_hat(y_hat,2)

#反归一化
y_hat = FNormalizeMult(y_hat, normalize)

print(y_hat.shape)
plt.plot(y_hat[:,0])
plt.show()
plt.plot(y_hat[:,1])
plt.show()

Lstm test_prediction2

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import  pandas as pd
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model


#------输出y维度转化-------
'''
当训练完成后 我们使用一个shape为(1,20,2)的test_X去预测得到的y_hat是一个(1,10)的矩阵
所以我们还需要将y_hat转化为(5,2)的矩阵
'''
def reshape_y_hat(y_hat,dim):
    re_y = []
    i = 0
    while i < len(y_hat):
        tmp = []
        for j in range(dim):
            tmp.append(y_hat[i+j])
        i = i + dim
        re_y.append(tmp)
    re_y = np.array(re_y,dtype='float64')
    return  re_y

#多维反归一化
def FNormalizeMult(data,normalize):

    data = np.array(data,dtype='float64')
    #列
    for i in  range(0,data.shape[1]):
        listlow =  normalize[i,0]
        listhigh = normalize[i,1]
        delta = listhigh - listlow
        #行
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  data[j,i]*delta + listlow

    return data

#使用训练数据的归一化
def NormalizeMultUseData(data,normalize):

    for i in range(0, data.shape[1]):

        listlow = normalize[i, 0]
        listhigh = normalize[i, 1]
        delta = listhigh - listlow

        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data

#仅对最后20条数据进行测试 因为预测仅最新有作用

dataframe = pd.read_csv('C:/Users/Dell/international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
dataset = dataframe.values
# 将整型变为float
flight = dataset.astype('float64')
flight_y = np.asarray(flight)[91:121,:].flatten()
flight_ytest = np.asarray(flight)[91:129,:].flatten()

data = np.zeros(60)
data.dtype = 'float64'
data = data.reshape(30,2)
sinx=np.arange(0,12*np.pi,2*np.pi/5,dtype='float64')
# siny=np.sin(sinx)
# cosx=np.arange(0,8*np.pi,2*np.pi/5,dtype='float64')
cosy=np.cos(sinx)
data[:,0] = flight_y
data[:,1] = cosy


#归一化
normalize = np.load("./MultiSteup2.npy")
data = NormalizeMultUseData(data, normalize)
model = load_model("./MultiSteup2.h5")
test_X = data.reshape(1,data.shape[0],data.shape[1])
y_hat  =  model.predict(test_X)
#重组
y_hat = y_hat.reshape(y_hat.shape[1])
y_hat = reshape_y_hat(y_hat,2)

#反归一化
y_hat = FNormalizeMult(y_hat, normalize)

# print(y_hat.shape)
plt.plot(flight_y,color='r')
plt.plot(flight_ytest,color='g')
# plt.plot(y_hat[:,0])
# plt.show()
# plt.plot(y_hat[:,1])
# plt.show()
ylist = flight_y.tolist()[:]
for i in range(len(y_hat[:,0])):
    t = y_hat[i,0]
    ylist.append(t)
plt.plot(ylist,color='b')
plt.show()

Lstm test_prediction3

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import  pandas as pd
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model


#------输出y维度转化-------
'''
当训练完成后 我们使用一个shape为(1,20,2)的test_X去预测得到的y_hat是一个(1,10)的矩阵
所以我们还需要将y_hat转化为(5,2)的矩阵
'''
def reshape_y_hat(y_hat,dim):
    re_y = []
    i = 0
    while i < len(y_hat):
        tmp = []
        for j in range(dim):
            tmp.append(y_hat[i+j])
        i = i + dim
        re_y.append(tmp)
    re_y = np.array(re_y,dtype='float64')
    return  re_y

#多维反归一化
def FNormalizeMult(data,normalize):

    data = np.array(data,dtype='float64')
    #列
    for i in  range(0,data.shape[1]):
        listlow =  normalize[i,0]
        listhigh = normalize[i,1]
        delta = listhigh - listlow
        #行
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  data[j,i]*delta + listlow

    return data

#使用训练数据的归一化
def NormalizeMultUseData(data,normalize):

    for i in range(0, data.shape[1]):

        listlow = normalize[i, 0]
        listhigh = normalize[i, 1]
        delta = listhigh - listlow

        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data


dataframe = pd.read_csv('C:/Users/Dell/Gauss_Data2.csv', usecols=[0,1,2,3], engine='python', skipfooter=0)
#usecols=[0,1,2,3]读取0/1/2/3列,skiprows/skipfooter=3 跳过开头/末尾三行
dataset = dataframe.values
# 将整型变为float
gauss_y = dataset.astype('float64')
gauss_y1 = gauss_y[70:90,1].flatten()
gauss_y1test = gauss_y[70:95,1].flatten()

gauss_y2 = gauss_y[70:90,1].flatten()
gauss_y2test = gauss_y[70:95,1].flatten()

gauss_y3 = gauss_y[70:90,1].flatten()
gauss_y3test = gauss_y[70:95,1].flatten()

gauss_y4 = gauss_y[70:90,1].flatten()
gauss_y4test = gauss_y[70:95,1].flatten()

data = np.zeros(80)
data.dtype = 'float64'
data = data.reshape(20,4)
data[:,0] = gauss_y1
data[:,1] = gauss_y2
data[:,2] = gauss_y3
data[:,3] = gauss_y4

#归一化
normalize = np.load("./MultiSteup3.npy")
data = NormalizeMultUseData(data, normalize)
model = load_model("./MultiSteup3.h5")
test_X = data.reshape(1,data.shape[0],data.shape[1])
y_hat  =  model.predict(test_X)
#重组
y_hat = y_hat.reshape(y_hat.shape[1])
y_hat = reshape_y_hat(y_hat,4)

#反归一化
y_hat = FNormalizeMult(y_hat, normalize)

# print(y_hat.shape)
fig = plt.figure(figsize=(10, 5))
plt.plot(gauss_y1,color='r')
plt.plot(gauss_y1test,color='g')
gauss_y1_list = gauss_y1.tolist()[:]
for i in range(len(y_hat[:,0])):
    t = y_hat[i,0]
    gauss_y1_list.append(t)
plt.plot(gauss_y1_list,color='b')
plt.show()

fig = plt.figure(figsize=(10, 5))
plt.plot(gauss_y2,color='r')
plt.plot(gauss_y2test,color='g')
gauss_y2_list = gauss_y2.tolist()[:]
for i in range(len(y_hat[:,1])):
    t = y_hat[i,1]
    gauss_y2_list.append(t)
plt.plot(gauss_y2_list,color='b')
plt.show()

fig = plt.figure(figsize=(10, 5))
plt.plot(gauss_y3,color='r')
plt.plot(gauss_y3test,color='g')
gauss_y3_list = gauss_y3.tolist()[:]
for i in range(len(y_hat[:,2])):
    t = y_hat[i,2]
    gauss_y3_list.append(t)
plt.plot(gauss_y3_list,color='b')
plt.show()

fig = plt.figure(figsize=(10, 5))
plt.plot(gauss_y4,color='r')
plt.plot(gauss_y4test,color='g')
gauss_y4_list = gauss_y4.tolist()[:]
for i in range(len(y_hat[:,3])):
    t = y_hat[i,3]
    gauss_y4_list.append(t)
plt.plot(gauss_y4_list,color='b')
plt.show()

# 计算平均百分比误差MAP
def mape(y_true, y_pred):
    return np.mean(np.abs((y_pred - y_true) / y_true)) * 100
print(mape(gauss_y[90:95,1], y_hat[:,0]),'\n')
print(mape(gauss_y[90:95,1], y_hat[:,1]),'\n')
print(mape(gauss_y[90:95,1], y_hat[:,2]),'\n')
print(mape(gauss_y[90:95,1], y_hat[:,3]),'\n')

Lstm test_prediction4

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import  pandas as pd
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model


# config = tf.compat.v1.ConfigProto()
# config.gpu_options.allow_growth = True
# session = tf.compat.v1.Session(config=config)
#------输出y维度转化-------
'''
当训练完成后 我们使用一个shape为(1,20,2)的test_X去预测得到的y_hat是一个(1,10)的矩阵
所以我们还需要将y_hat转化为(5,2)的矩阵
'''
def reshape_y_hat(y_hat,dim):
    re_y = []
    i = 0
    while i < len(y_hat):
        tmp = []
        for j in range(dim):
            tmp.append(y_hat[i+j])
        i = i + dim
        re_y.append(tmp)
    re_y = np.array(re_y,dtype='float64')
    return  re_y

#多维反归一化
def FNormalizeMult(data,normalize):

    data = np.array(data,dtype='float64')
    #列
    for i in  range(0,data.shape[1]):
        listlow =  normalize[i,0]
        listhigh = normalize[i,1]
        delta = listhigh - listlow
        #行
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  data[j,i]*delta + listlow

    return data

#使用训练数据的归一化
def NormalizeMultUseData(data,normalize):

    for i in range(0, data.shape[1]):

        listlow = normalize[i, 0]
        listhigh = normalize[i, 1]
        delta = listhigh - listlow

        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data


dataframe = pd.read_csv('C:/Users/Dell/Gauss_Data5.csv', usecols=[0,1,2,3], engine='python', skipfooter=0)
#usecols=[0,1,2,3]读取0/1/2/3列,skiprows/skipfooter=3 跳过开头/末尾三行
dataset = dataframe.values
# 将整型变为float
gauss_y = dataset.astype('float64')
gauss_y1 = gauss_y[80:90,1].flatten()
gauss_y1test = gauss_y[80:95,1].flatten()

gauss_y2 = gauss_y[80:90,3].flatten()
gauss_y2test = gauss_y[80:95,3].flatten()

gauss_y3 = gauss_y[80:90,1].flatten()
gauss_y3test = gauss_y[80:95,1].flatten()

gauss_y4 = gauss_y[80:90,3].flatten()
gauss_y4test = gauss_y[80:95,3].flatten()

data = np.zeros(40)
data.dtype = 'float64'
data = data.reshape(10,4)
data[:,0] = gauss_y1
data[:,1] = gauss_y2
data[:,2] = gauss_y3
data[:,3] = gauss_y4

#归一化

normalize = np.load("./MultiSteup4.npy")
data = NormalizeMultUseData(data, normalize)
model = load_model("./MultiSteup4.h5")
test_X = data.reshape(1,data.shape[0],data.shape[1])
y_hat  =  model.predict(test_X)
#重组
y_hat = y_hat.reshape(y_hat.shape[1])
y_hat = reshape_y_hat(y_hat,4)

#反归一化
y_hat = FNormalizeMult(y_hat, normalize)

# print(y_hat.shape)
fig,ax = plt.subplots(figsize=(10, 5))
font = {'family':'Times New Roman',
                'style':'normal',
                'weight':'normal',
                'size':14,
               }
ax.plot(gauss_y1,color='b')
ax.plot(gauss_y1test,color='g')
gauss_y1_list = gauss_y1.tolist()[:]
for i in range(len(y_hat[:,0])):
    t = y_hat[i,0]
    gauss_y1_list.append(t)
ax.plot(gauss_y1_list,color='b')
#设置刻度字体
plt.tick_params(labelsize=16)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
#设置刻度间隔数
plt.xticks(np.arange(0, 15, step=2))
plt.yticks(np.arange(0.2, 1.01, step=0.2))
plt.legend(['prediction','true'],loc=1,prop=font,framealpha=0)
plt.show()


fig,ax = plt.subplots(figsize=(10, 5))
plt.plot(gauss_y2,color='b')
plt.plot(gauss_y2test,color='g')
gauss_y2_list = gauss_y2.tolist()[:]
for i in range(len(y_hat[:,1])):
    t = y_hat[i,1]
    gauss_y2_list.append(t)
plt.plot(gauss_y2_list,color='b')
#设置刻度字体
plt.tick_params(labelsize=16)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
#设置刻度间隔数
plt.xticks(np.arange(0, 15, step=2))
plt.yticks(np.arange(0.2, 1.31, step=0.2))
plt.legend(['prediction','true'],loc=1,prop=font,framealpha=0)
plt.show()

fig,ax = plt.subplots(figsize=(10, 5))
font = {'family':'Times New Roman',
                'style':'normal',
                'weight':'normal',
                'size':14,
               }
ax.plot(gauss_y3,color='b')
ax.plot(gauss_y3test,color='g')
gauss_y3_list = gauss_y3.tolist()[:]
for i in range(len(y_hat[:,2])):
    t = y_hat[i,2]
    gauss_y3_list.append(t)
plt.plot(gauss_y3_list,color='b')
#设置刻度字体
plt.tick_params(labelsize=16)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
#设置刻度间隔数
plt.xticks(np.arange(0, 15, step=2))
plt.yticks(np.arange(0.2, 1.01, step=0.2))
plt.legend(['prediction','true'],loc=1,prop=font,framealpha=0)
plt.show()


fig,ax = plt.subplots(figsize=(10, 5))
font = {'family':'Times New Roman',
                'style':'normal',
                'weight':'normal',
                'size':14,
               }
plt.plot(gauss_y4,color='b')
plt.plot(gauss_y4test,color='g')
gauss_y4_list = gauss_y4.tolist()[:]
for i in range(len(y_hat[:,3])):
    t = y_hat[i,3]
    gauss_y4_list.append(t)
plt.plot(gauss_y4_list,color='b')
#设置刻度字体
plt.tick_params(labelsize=16)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
#设置刻度间隔数
plt.xticks(np.arange(0, 15, step=2))
plt.yticks(np.arange(0.2, 1.31, step=0.2))
plt.legend(['prediction','true'],loc=1,prop=font,framealpha=0)
plt.show()


# 计算平均百分比误差MAP
def mape(y_true, y_pred):
    return np.mean(np.abs((y_pred - y_true) / y_true)) * 100
print(mape(gauss_y[90:95,1], y_hat[:,0]),'\n')
print(mape(gauss_y[90:95,3], y_hat[:,1]),'\n')
print(mape(gauss_y[90:95,1], y_hat[:,2]),'\n')
print(mape(gauss_y[90:95,3], y_hat[:,3]),'\n')

Lstm test_prediction5

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import  pandas as pd
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model


# config = tf.compat.v1.ConfigProto()
# config.gpu_options.allow_growth = True
# session = tf.compat.v1.Session(config=config)
#------输出y维度转化-------
'''
当训练完成后 我们使用一个shape为(1,20,2)的test_X去预测得到的y_hat是一个(1,10)的矩阵
所以我们还需要将y_hat转化为(5,2)的矩阵
'''
def reshape_y_hat(y_hat,dim):
    re_y = []
    i = 0
    while i < len(y_hat):
        tmp = []
        for j in range(dim):
            tmp.append(y_hat[i+j])
        i = i + dim
        re_y.append(tmp)
    re_y = np.array(re_y,dtype='float64')
    return  re_y

#多维反归一化
def FNormalizeMult(data,normalize):

    data = np.array(data,dtype='float64')
    #列
    for i in  range(0,data.shape[1]):
        listlow =  normalize[i,0]
        listhigh = normalize[i,1]
        delta = listhigh - listlow
        #行
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  data[j,i]*delta + listlow

    return data

#使用训练数据的归一化
def NormalizeMultUseData(data,normalize):

    for i in range(0, data.shape[1]):

        listlow = normalize[i, 0]
        listhigh = normalize[i, 1]
        delta = listhigh - listlow

        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data


dataframe = pd.read_csv('C:/Users/Dell/Gauss_Data3.csv', usecols=[0,1,2,3], engine='python', skipfooter=0)
#usecols=[0,1,2,3]读取0/1/2/3列,skiprows/skipfooter=3 跳过开头/末尾三行
dataset = dataframe.values
# 将整型变为float
gauss_y = dataset.astype('float64')
gauss_y1 = gauss_y[80:90,3].flatten()
gauss_y1test = gauss_y[80:95,3].flatten()

gauss_y2 = gauss_y[80:90,3].flatten()
gauss_y2test = gauss_y[80:95,3].flatten()

gauss_y3 = gauss_y[80:90,3].flatten()
gauss_y3test = gauss_y[80:95,3].flatten()

gauss_y4 = gauss_y[80:90,3].flatten()
gauss_y4test = gauss_y[80:95,3].flatten()

data = np.zeros(40)
data.dtype = 'float64'
data = data.reshape(10,4)
data[:,0] = gauss_y1
data[:,1] = gauss_y2
data[:,2] = gauss_y3
data[:,3] = gauss_y4

#归一化

normalize = np.load("./MultiSteup6.npy")
data = NormalizeMultUseData(data, normalize)
model = load_model("./MultiSteup6.h5")
test_X = data.reshape(1,data.shape[0],data.shape[1])
y_hat  =  model.predict(test_X)
#重组
y_hat = y_hat.reshape(y_hat.shape[1])
y_hat = reshape_y_hat(y_hat,4)

#反归一化
y_hat = FNormalizeMult(y_hat, normalize)

# print(y_hat.shape)
fig,ax = plt.subplots(figsize=(10, 5))
font = {'family':'Times New Roman',
                'style':'normal',
                'weight':'normal',
                'size':14,
               }
ax.plot(gauss_y1,color='b')
ax.plot(gauss_y1test,color='g')
gauss_y1_list = gauss_y1.tolist()[:]
for i in range(len(y_hat[:,0])):
    t = y_hat[i,0]
    gauss_y1_list.append(t)
ax.plot(gauss_y1_list,color='b')
#设置刻度字体
plt.tick_params(labelsize=16)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
#设置刻度间隔数
plt.xticks(np.arange(0, 15, step=2))
plt.yticks(np.arange(0, 2.01, step=0.25))
plt.legend(['prediction','true'],loc=1,prop=font,framealpha=0)
plt.show()

fig = plt.figure(figsize=(10, 5))
plt.plot(gauss_y2,color='r')
plt.plot(gauss_y2test,color='g')
gauss_y2_list = gauss_y2.tolist()[:]
for i in range(len(y_hat[:,1])):
    t = y_hat[i,1]
    gauss_y2_list.append(t)
plt.plot(gauss_y2_list,color='b')
plt.show()

fig,ax = plt.subplots(figsize=(10, 5))
font = {'family':'Times New Roman',
                'style':'normal',
                'weight':'normal',
                'size':14,
               }
plt.plot(gauss_y3,color='b')
plt.plot(gauss_y3test,color='g')
gauss_y3_list = gauss_y3.tolist()[:]
for i in range(len(y_hat[:,2])):
    t = y_hat[i,2]
    gauss_y3_list.append(t)
plt.plot(gauss_y3_list,color='b')
#设置刻度字体
plt.tick_params(labelsize=16)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
#设置刻度间隔数
plt.xticks(np.arange(0, 15, step=2))
plt.yticks(np.arange(-0.75, 1.01, step=0.25))
plt.legend(['prediction','true'],loc=1,prop=font,framealpha=0)
plt.show()

fig = plt.figure(figsize=(10, 5))
plt.plot(gauss_y4,color='r')
plt.plot(gauss_y4test,color='g')
gauss_y4_list = gauss_y4.tolist()[:]
for i in range(len(y_hat[:,3])):
    t = y_hat[i,3]
    gauss_y4_list.append(t)
plt.plot(gauss_y4_list,color='b')
plt.show()

# 计算平均百分比误差MAP
def mape(y_true, y_pred):
    return np.mean(np.abs((y_pred - y_true) / y_true)) * 100
print(mape(gauss_y[90:95,3], y_hat[:,0]),'\n')
print(mape(gauss_y[90:95,3], y_hat[:,1]),'\n')
print(mape(gauss_y[90:95,3], y_hat[:,2]),'\n')
print(mape(gauss_y[90:95,3], y_hat[:,3]),'\n')

Lstm train-model1

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import  pandas as pd
import math
import  tensorflow.python.keras.callbacks
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model
from tensorflow.python.keras.callbacks import Callback


#---------定义转化函数-----------
def create_dataset(data,n_predictions,n_next):
    '''
    对数据进行处理
    '''
    dim = data.shape[1]
    train_X, train_Y = [], []
    for i in range(data.shape[0]-n_predictions-n_next-1):
        a = data[i:(i+n_predictions),:]
        train_X.append(a)
        tempb = data[(i+n_predictions):(i+n_predictions+n_next),:]
        b = []
        for j in range(len(tempb)):
            for k in range(dim):
                b.append(tempb[j,k])
        train_Y.append(b)
    train_X = np.array(train_X,dtype='float64')
    train_Y = np.array(train_Y,dtype='float64')

    return train_X, train_Y
#定义归一化函数
def NormalizeMult(data):
    '''
    归一化 适用于单维和多维
    返回归一化后的数据和最大最小值
    '''
    normalize = np.arange(2*data.shape[1],dtype='float64')
    normalize = normalize.reshape(data.shape[1],2)

    for i in range(0,data.shape[1]):

        list = data[:,i]
        listlow,listhigh =  np.percentile(list, [0, 100])

        normalize[i,0] = listlow
        normalize[i,1] = listhigh

        delta = listhigh - listlow
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data,normalize

#----------模型训练函数------------
def trainModel(train_X, train_Y):
    '''
    trainX,trainY: 训练LSTM模型所需要的数据
    '''
    model = Sequential()
    model.add(LSTM(
        40,
        input_shape=(train_X.shape[1], train_X.shape[2]),
        return_sequences=True))
    model.add(Dropout(0.3))

    model.add(LSTM(
        40,
        return_sequences=False))
    model.add(Dropout(0.3))

    model.add(Dense(
        train_Y.shape[1]))
    model.add(Activation("relu"))

    model.compile(loss='mse', optimizer='adam')
    model.fit(train_X, train_Y, epochs=100, batch_size=32, verbose=1)

    return model
#----------构造数据进行测试----------
# 二维数据,sin()和cos(),数据长度是100,使用前20步去预测后8步
data = np.zeros(200)
data.dtype = 'float64'
data = data.reshape(100,2)
sinx=np.arange(0,40*np.pi,2*np.pi/5,dtype='float64')
siny=np.sin(sinx)
cosx=np.arange(0,40*np.pi,2*np.pi/5,dtype='float64')
cosy=np.cos(sinx)

data[:,0] = siny
data[:,1] = cosy

#print(data)
fig = plt.figure(figsize=(10, 5))
plt.plot(data[:,0])
plt.show()
fig = plt.figure(figsize=(10, 5))
plt.plot(data[:,1])
plt.show()
#归一化的加入
data,normalize = NormalizeMult(data)

train_X,train_Y = create_dataset(data,20,8)
model = trainModel(train_X,train_Y)

np.save("./MultiSteup1.npy",normalize)
model.save("./MultiSteup1.h5")

Lstm train-model2

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import  pandas as pd
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model


#---------定义转化函数-----------
def create_dataset(data,n_predictions,n_next):
    '''
    对数据进行处理
    '''
    dim = data.shape[1]
    train_X, train_Y = [], []
    for i in range(data.shape[0]-n_predictions-n_next-1):
        a = data[i:(i+n_predictions),:]
        train_X.append(a)
        tempb = data[(i+n_predictions):(i+n_predictions+n_next),:]
        b = []
        for j in range(len(tempb)):
            for k in range(dim):
                b.append(tempb[j,k])
        train_Y.append(b)
    train_X = np.array(train_X,dtype='float64')
    train_Y = np.array(train_Y,dtype='float64')

    return train_X, train_Y
#定义归一化函数
def NormalizeMult(data):
    '''
    归一化 适用于单维和多维
    返回归一化后的数据和最大最小值
    '''
    normalize = np.arange(2*data.shape[1],dtype='float64')
    normalize = normalize.reshape(data.shape[1],2)

    for i in range(0,data.shape[1]):

        list = data[:,i]
        listlow,listhigh =  np.percentile(list, [0, 100])

        normalize[i,0] = listlow
        normalize[i,1] = listhigh

        delta = listhigh - listlow
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data,normalize

#----------模型训练函数------------
def trainModel(train_X, train_Y):
    '''
    trainX,trainY: 训练LSTM模型所需要的数据
    '''
    model = Sequential()
    model.add(LSTM(
        400,
        input_shape=(train_X.shape[1], train_X.shape[2]),
        return_sequences=True))
    model.add(Dropout(0.3))

    model.add(LSTM(
        400,
        return_sequences=False))
    model.add(Dropout(0.3))

    model.add(Dense(
        train_Y.shape[1]))
    model.add(Activation("relu"))

    model.compile(loss='mse', optimizer='adam')
    model.fit(train_X, train_Y, epochs=150, batch_size=4, verbose=1)

    return model
#----------构造数据进行测试----------
# 二维数据,sin()和cos(),数据长度是100,使用前20步去预测后8步
dataframe = pd.read_csv('C:/Users/Dell/international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
dataset = dataframe.values
# 将整型变为float
flight = dataset.astype('float64')
flight_y = np.asarray(flight)[21:121,:].flatten()

data = np.zeros(200)
data.dtype = 'float64'
data = data.reshape(100,2)
sinx=np.arange(0,40*np.pi,2*np.pi/5,dtype='float64')
siny=np.sin(sinx)
cosx=np.arange(0,40*np.pi,2*np.pi/5,dtype='float64')
cosy=np.cos(sinx)

data[:,0] = flight_y
data[:,1] = cosy

#print(data)
fig = plt.figure(figsize=(10, 5))
plt.plot(data[:,0])
plt.show()
fig = plt.figure(figsize=(10, 5))
plt.plot(data[:,1])
plt.show()
#归一化的加入
data,normalize = NormalizeMult(data)

train_X,train_Y = create_dataset(data,30,8)
model = trainModel(train_X,train_Y)

np.save("./MultiSteup2.npy",normalize)
model.save("./MultiSteup2.h5")

Lstm train-model3

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model

tf.random.set_seed(7)
np.random.seed(0)

#---------定义转化函数-----------
def create_dataset(data,n_predictions,n_next):
    '''
    对数据进行处理
    '''
    dim = data.shape[1]
    train_X, train_Y = [], []
    for i in range(data.shape[0]-n_predictions-n_next-1):
        a = data[i:(i+n_predictions),:]
        train_X.append(a)
        tempb = data[(i+n_predictions):(i+n_predictions+n_next),:]
        b = []
        for j in range(len(tempb)):
            for k in range(dim):
                b.append(tempb[j,k])
        train_Y.append(b)
    train_X = np.array(train_X,dtype='float64')
    train_Y = np.array(train_Y,dtype='float64')

    return train_X, train_Y

#定义归一化函数
def NormalizeMult(data):
    '''
    归一化 适用于单维和多维
    返回归一化后的数据和最大最小值
    '''
    normalize = np.arange(2*data.shape[1],dtype='float64')
    normalize = normalize.reshape(data.shape[1],2)

    for i in range(0,data.shape[1]):

        list = data[:,i]
        listlow,listhigh =  np.percentile(list, [0, 100])

        normalize[i,0] = listlow
        normalize[i,1] = listhigh

        delta = listhigh - listlow
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data,normalize

#----------模型训练函数------------
def trainModel(train_X, train_Y):
    '''
    trainX,trainY: 训练LSTM模型所需要的数据
    '''
    model = Sequential()
    model.add(LSTM(
        400,
        input_shape=(train_X.shape[1], train_X.shape[2]),
        return_sequences=True))
    model.add(Dropout(0.3))

    model.add(LSTM(
        400,
        input_shape=(train_X.shape[1], train_X.shape[2]),
        return_sequences=True))
    model.add(Dropout(0.3))
    
    model.add(LSTM(
        400,
        input_shape=(train_X.shape[1], train_X.shape[2]),
        return_sequences=False))
    model.add(Dropout(0.3))
    
#     model.add(LSTM(
#         600,
#         return_sequences=False))
#     model.add(Dropout(0.3))

    model.add(Dense(
        train_Y.shape[1]))
    model.add(Activation("relu"))

    model.compile(loss='mse', optimizer='adam')
    model.fit(train_X, train_Y, epochs=100, batch_size=4, verbose=1)

    return model
#----------构造数据进行测试----------
# 二维数据,sin()和cos(),数据长度是100,使用前20步去预测后5步
dataframe = pd.read_csv('C:/Users/Dell/Gauss_Data2.csv', usecols=[0,1,2,3], engine='python', skipfooter=0)
#usecols=[0,1,2,3]读取0/1/2/3列,skiprows/skipfooter=3 跳过开头/末尾三行
dataset = dataframe.values
# 将整型变为float
gauss_y = dataset.astype('float64')
gauss_y1 = gauss_y[0:90,0].flatten()
gauss_y2 = gauss_y[0:90,1].flatten()
gauss_y3 = gauss_y[0:90,2].flatten()
gauss_y4 = gauss_y[0:90,3].flatten()
# gauss_y4 = gauss_y[21:71,3].flatten()
data = np.zeros(360)
data.dtype = 'float64'
data = data.reshape(90,4)
# x=np.arange(0,40*np.pi,2*np.pi/5,dtype='float64')
# siny=np.sin(x)
# cosy=np.cos(x)
# cosy2=np.cos(x)+1

data[:,0] = gauss_y1
data[:,1] = gauss_y2
data[:,2] = gauss_y3
data[:,3] = gauss_y4

#print(data)
fig = plt.figure(figsize=(10, 5))
plt.plot(data[:,0])
plt.plot(data[:,1])
plt.plot(data[:,2])
plt.plot(data[:,3])
plt.show()
fig = plt.figure(figsize=(10, 5))
plt.subplot(2,2,1)
plt.plot(data[:,0])
plt.subplot(2,2,2)
plt.plot(data[:,1])
plt.subplot(2,2,3)
plt.plot(data[:,2])
plt.subplot(2,2,4)
plt.plot(data[:,3])
plt.show()
#归一化的加入
# data,normalize = NormalizeMult(data)

# train_X,train_Y = create_dataset(data,20,5)
# model = trainModel(train_X,train_Y)

# np.save("./MultiSteup3.npy",normalize)
# model.save("./MultiSteup3.h5")

Lstm train-model4

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model

tf.random.set_seed(7)
np.random.seed(0)

#---------定义转化函数-----------
def create_dataset(data,n_predictions,n_next):
    '''
    对数据进行处理
    '''
    dim = data.shape[1]
    train_X, train_Y = [], []
    for i in range(data.shape[0]-n_predictions-n_next-1):
        a = data[i:(i+n_predictions),:]
        train_X.append(a)
        tempb = data[(i+n_predictions):(i+n_predictions+n_next),:]
        b = []
        for j in range(len(tempb)):
            for k in range(dim):
                b.append(tempb[j,k])
        train_Y.append(b)
    train_X = np.array(train_X,dtype='float64')
    train_Y = np.array(train_Y,dtype='float64')

    return train_X, train_Y

#定义归一化函数
def NormalizeMult(data):
    '''
    归一化 适用于单维和多维
    返回归一化后的数据和最大最小值
    '''
    normalize = np.arange(2*data.shape[1],dtype='float64')
    normalize = normalize.reshape(data.shape[1],2)

    for i in range(0,data.shape[1]):

        list = data[:,i]
        listlow,listhigh =  np.percentile(list, [0, 100])

        normalize[i,0] = listlow
        normalize[i,1] = listhigh

        delta = listhigh - listlow
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data,normalize

#----------模型训练函数------------
def trainModel(train_X, train_Y):
    '''
    trainX,trainY: 训练LSTM模型所需要的数据
    '''
    model = Sequential()
    model.add(LSTM(
        310,
        input_shape=(train_X.shape[1], train_X.shape[2]),
        return_sequences=True))
    model.add(Dropout(0.3))

    model.add(LSTM(
        340,
        input_shape=(train_X.shape[1], train_X.shape[2]),
        return_sequences=False))
    model.add(Dropout(0.3))
    
#     model.add(LSTM(
#         300,
#         input_shape=(train_X.shape[1], train_X.shape[2]),
#         return_sequences=False))
#     model.add(Dropout(0.3))
    
#     model.add(LSTM(
#         600,
#         return_sequences=False))
#     model.add(Dropout(0.3))

    model.add(Dense(
        train_Y.shape[1]))
    model.add(Activation("relu"))

    model.compile(loss='mse', optimizer='adam')
    model.fit(train_X, train_Y, epochs=92, batch_size=4, verbose=1)

    return model
#----------构造数据进行测试----------
# 二维数据,sin()和cos(),数据长度是100,使用前20步去预测后5步
dataframe = pd.read_csv('C:/Users/Dell/Gauss_Data5.csv', usecols=[0,1,2,3], engine='python', skipfooter=0)
#usecols=[0,1,2,3]读取0/1/2/3列,skiprows/skipfooter=3 跳过开头/末尾三行
dataset = dataframe.values
# 将整型变为float
gauss_y = dataset.astype('float64')
gauss_y1 = gauss_y[0:90,0].flatten()
gauss_y2 = gauss_y[0:90,1].flatten()
gauss_y3 = gauss_y[0:90,2].flatten()
gauss_y4 = gauss_y[0:90,3].flatten()

# gauss_y4 = gauss_y[21:71,3].flatten()
data = np.zeros(360)
data.dtype = 'float64'
data = data.reshape(90,4)
# x=np.arange(0,40*np.pi,2*np.pi/5,dtype='float64')
# siny=np.sin(x)
# cosy=np.cos(x)
# cosy2=np.cos(x)+1

data[:,0] = gauss_y2
data[:,1] = gauss_y4
data[:,2] = gauss_y2
data[:,3] = gauss_y4

#print(data)
fig = plt.figure(figsize=(10, 5))
plt.plot(data[:,0])
plt.plot(data[:,1])
plt.plot(data[:,2])
plt.plot(data[:,3])
plt.show()

# 归一化的加入
data,normalize = NormalizeMult(data)

train_X,train_Y = create_dataset(data,10,5)
model = trainModel(train_X,train_Y)

np.save("./MultiSteup4.npy",normalize)
model.save("./MultiSteup4.h5")

Lstm train-model5

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#这一步程序是指定哪一块GPU运行,否则将默认CPU执行
from tensorflow.python.keras.layers.core import Dense, Activation, Dropout
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.models import Sequential, load_model

tf.random.set_seed(7)
np.random.seed(0)

#---------定义转化函数-----------
def create_dataset(data,n_predictions,n_next):
    '''
    对数据进行处理
    '''
    dim = data.shape[1]
    train_X, train_Y = [], []
    for i in range(data.shape[0]-n_predictions-n_next-1):
        a = data[i:(i+n_predictions),:]
        train_X.append(a)
        tempb = data[(i+n_predictions):(i+n_predictions+n_next),:]
        b = []
        for j in range(len(tempb)):
            for k in range(dim):
                b.append(tempb[j,k])
        train_Y.append(b)
    train_X = np.array(train_X,dtype='float64')
    train_Y = np.array(train_Y,dtype='float64')

    return train_X, train_Y

#定义归一化函数
def NormalizeMult(data):
    '''
    归一化 适用于单维和多维
    返回归一化后的数据和最大最小值
    '''
    normalize = np.arange(2*data.shape[1],dtype='float64')
    normalize = normalize.reshape(data.shape[1],2)

    for i in range(0,data.shape[1]):

        list = data[:,i]
        listlow,listhigh =  np.percentile(list, [0, 100])

        normalize[i,0] = listlow
        normalize[i,1] = listhigh

        delta = listhigh - listlow
        if delta != 0:
            for j in range(0,data.shape[0]):
                data[j,i]  =  (data[j,i] - listlow)/delta

    return  data,normalize

#----------模型训练函数------------
def trainModel(train_X, train_Y):
    '''
    trainX,trainY: 训练LSTM模型所需要的数据
    '''
    model = Sequential()
    model.add(LSTM(
        310,
        input_shape=(train_X.shape[1], train_X.shape[2]),
        return_sequences=True))
    model.add(Dropout(0.3))

    model.add(LSTM(
        320,
        input_shape=(train_X.shape[1], train_X.shape[2]),
        return_sequences=False))
    model.add(Dropout(0.3))
    
#     model.add(LSTM(
#         300,
#         input_shape=(train_X.shape[1], train_X.shape[2]),
#         return_sequences=False))
#     model.add(Dropout(0.3))
    
#     model.add(LSTM(
#         600,
#         return_sequences=False))
#     model.add(Dropout(0.3))

    model.add(Dense(
        train_Y.shape[1]))
    model.add(Activation("relu"))

    model.compile(loss='mse', optimizer='adam')
    model.fit(train_X, train_Y, epochs=100, batch_size=4, verbose=1)

    return model
#----------构造数据进行测试----------
# 二维数据,sin()和cos(),数据长度是100,使用前20步去预测后5步
dataframe = pd.read_csv('C:/Users/Dell/Gauss_Data3.csv', usecols=[0,1,2,3], engine='python', skipfooter=0)
#usecols=[0,1,2,3]读取0/1/2/3列,skiprows/skipfooter=3 跳过开头/末尾三行
dataset = dataframe.values
# 将整型变为float
gauss_y = dataset.astype('float64')
gauss_y1 = gauss_y[0:90,0].flatten()
gauss_y2 = gauss_y[0:90,1].flatten()
gauss_y3 = gauss_y[0:90,2].flatten()
gauss_y4 = gauss_y[0:90,3].flatten()

# gauss_y4 = gauss_y[21:71,3].flatten()
data = np.zeros(360)
data.dtype = 'float64'
data = data.reshape(90,4)
# x=np.arange(0,40*np.pi,2*np.pi/5,dtype='float64')
# siny=np.sin(x)
# cosy=np.cos(x)
# cosy2=np.cos(x)+1

data[:,0] = gauss_y4
data[:,1] = gauss_y4
data[:,2] = gauss_y4
data[:,3] = gauss_y4

#print(data)

fig = plt.figure(figsize=(10, 5))
plt.plot(data[:,0])
plt.plot(data[:,1])
plt.plot(data[:,2])
plt.plot(data[:,3])
plt.show()
fig = plt.figure(figsize=(10, 5))
plt.subplot(2,2,1)
plt.plot(data[:,0])
plt.subplot(2,2,2)
plt.plot(data[:,1])
plt.subplot(2,2,3)
plt.plot(data[:,2])
plt.subplot(2,2,4)
plt.plot(data[:,3])
plt.show()
#归一化的加入
data,normalize = NormalizeMult(data)

train_X,train_Y = create_dataset(data,10,5)
model = trainModel(train_X,train_Y)

np.save("./MultiSteup6.npy",normalize)
model.save("./MultiSteup6.h5")

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值