LSTM算法

该代码示例展示了如何利用TensorFlow和Keras库处理时间序列数据,特别是股票价格。它首先加载GOOGL的股票数据,然后进行预处理,包括填充缺失值、标准化。接着,使用GRU(门控循环单元)神经网络模型进行训练,用于预测未来的股票收盘价。模型经过多个GRU层和Dropout层,最后进行了训练、验证和测试,并可视化了损失和MSLE(均方对数误差)随训练轮数的变化。
摘要由CSDN通过智能技术生成
import tensorflow as tf
from tensorflow import keras
from keras import layers
import matplotlib.pyplot as plt
"""
# 调用GPU加速
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)
"""
    # pip install pandas_datareader
import pandas_datareader.data as web
import datetime 

start = datetime.datetime(2000, 1, 1)  
end = datetime.datetime(2021, 9, 1) 


df = web.DataReader('GOOGL', 'stooq', start, end)

print(df)

df.dropna(inplace=True)  


df.sort_index(inplace=True)  
print(df)


pre_days = 10

df['label'] = df['Close'].shift(-pre_days)
print(df)

from sklearn.preprocessing import StandardScaler  

scaler = StandardScaler()  

sca_x = scaler.fit_transform(df.iloc[:, :-1])

print(sca_x)

import numpy as np
from collections import deque  

men_his_days = 20 

deq = deque(maxlen=men_his_days)


x = []

for i in sca_x:
   
    deq.append(list(i))  
 
    if len(deq) == men_his_days:
     
        x.append(list(deq))  

x = x[:-pre_days]

print(len(x))  # 4260

y = df['label'].values[men_his_days - 1: -pre_days]
print(len(y))  


x, y = np.array(x), np.array(y)

total_num = len(x)  
train_num = int(total_num * 0.8)  
val_num = int(total_num * 0.9)  


x_train, y_train = x[:train_num], y[:train_num]  
x_val, y_val = x[train_num:val_num], y[train_num:val_num]  
x_test, y_test = x[val_num:], y[val_num:]  

batch_size = 128 

train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.batch(batch_size).shuffle(10000)  

val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_ds = val_ds.batch(batch_size)

test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = test_ds.batch(batch_size)


sample = next(iter(train_ds))  
print('x_train.shape:', sample[0].shape)  # (128, 20, 5)
print('y_train.shape:', sample[1].shape)  # (128,)

input_shape = sample[0].shape[-2:]  


inputs = keras.Input(shape=input_shape)  # [None,20,5]


x = layers.GRU(8, activation='relu', return_sequences=True, kernel_regularizer=keras.regularizers.l2(0.01))(inputs)
x = layers.Dropout(0.2)(x)  


x = layers.GRU(16, activation='relu', return_sequences=True, kernel_regularizer=keras.regularizers.l2(0.01))(x)
x = layers.Dropout(0.2)(x)


x = layers.GRU(32, activation='relu')(x)
x = layers.Dropout(0.2)(x)


x = layers.Dense(16, activation='relu', kernel_initializer='random_normal',
                 kernel_regularizer=keras.regularizers.l2(0.01))(x)
x = layers.Dropout(0.2)(x)


outputs = layers.Dense(1)(x)


model = keras.Model(inputs, outputs)


model.summary()


model.compile(optimizer=keras.optimizers.Adam(0.001),  
              loss=tf.keras.losses.MeanAbsoluteError(),  
              metrics=tf.keras.losses.MeanSquaredLogarithmicError()) 

epochs = 10  


history = model.fit(train_ds, epochs=epochs, validation_data=val_ds)


history_dict = history.history  
train_loss = history_dict['loss']  
val_loss = history_dict['val_loss']  
train_msle = history_dict['mean_squared_logarithmic_error']  
val_msle = history_dict['val_mean_squared_logarithmic_error'] 


plt.figure()
plt.plot(range(epochs), train_loss, label='train_loss') 
plt.plot(range(epochs), val_loss, label='val_loss')  
plt.legend()
plt.xlabel('epochs')
plt.ylabel('loss')
plt.show()


plt.figure()
plt.plot(range(epochs), train_msle, label='train_msle')  
plt.plot(range(epochs), val_msle, label='val_msle')  
plt.legend()  
plt.xlabel('epochs')
plt.ylabel('msle')
plt.show()


model.evaluate(test_ds)


y_pred = model.predict(x_test)


df_time = df.index[-len(y_test):]


fig = plt.figure(figsize=(10, 5))  
axes = fig.add_subplot(111)  

axes.plot(df_time, y_test, 'b-', label='actual')

axes.plot(df_time, y_pred, 'r--', label='predict')

axes.set_xticks(df_time[::50])
axes.set_xticklabels(df_time[::50], rotation=45)

plt.legend()  
plt.grid()  
plt.show()
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

25627

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值