tensorflow实现简单RNN

使用简单RNN预测谷歌的股票

实验数据

import numpy as np
import tensorflow.keras as keras
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler


def RNN(x_train, y_train):
    regressor = keras.Sequential()
    # add the first RNN and the Dropout to prevent overfitting
    regressor.add(keras.layers.SimpleRNN(units=50, return_sequences=True,
                                         input_shape=(x_train.shape[1], 1)))
    # if the return_sequences is True, it means that it has another rnn upward,and it return
    # (batch_size,time_step,units) shape matrix
    # otherwise it return (batch_size,units) matrix
    regressor.add(keras.layers.Dropout(0.2))

    # add the second one
    regressor.add(keras.layers.SimpleRNN(units=50, activation='tanh', return_sequences=True))
    regressor.add(keras.layers.Dropout(0.2))

    regressor.add(keras.layers.SimpleRNN(units=50))
    regressor.add(keras.layers.Dropout(0.2))
    # add the output layer
    regressor.add(keras.layers.Dense(units=1))
    # compile
    regressor.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
    regressor.fit(x=x_train, y=y_train, epochs=30, batch_size=32)
    # print(regressor.summary())
    return regressor


def visualization(real, pred):
    plt.figure(figsize=(8, 4), dpi=80, facecolor='w', edgecolor='k')
    plt.plot(real, color="orange", label="Real value")
    plt.plot(pred, color="c", label="RNN predicted result")
    plt.legend()
    plt.xlabel("Days")
    plt.ylabel("Values")
    plt.grid(True)
    plt.show()


if __name__ == "__main__":
    data = pd.read_csv(r"dataset\geogle_stock_price\archive\Google_Stock_Price_Train.csv")
    data = data.loc[:, ["Open"]].values

    train = data[:len(data) - 50]
    test = data[len(train):]
    train.reshape(train.shape[0], 1)
    scaler = MinMaxScaler(feature_range=(0, 1))
    train_scaled = scaler.fit_transform(train)

    # plt.plot(train_scaled)
    # plt.show()
    X_train = []
    Y_train = []

    time_step = 50
    for i in range(time_step, train_scaled.shape[0]):
        X_train.append(train_scaled[i - time_step:i, 0])
        Y_train.append(train_scaled[i, 0])

    X_train, Y_train = np.array(X_train), np.array(Y_train)
    X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)

    inputs = data[len(data) - len(test) - time_step:]
    inputs = scaler.transform(inputs)
    X_test = []
    for i in range(time_step, inputs.shape[0]):
        X_test.append(inputs[i - time_step:i, 0])
    X_test = np.array(X_test)
    X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)

    model = RNN(X_train, Y_train)
    pred = model.predict(X_test)
    pred = scaler.inverse_transform(pred)

    visualization(real=test, pred=pred)
    

预测结果

  • 简单的RNN会遇到例如梯度消失无法嵌套太多层、单项传播无法考虑后面对前面的影响等各种问题

LSTM模型预测

# -*-coding = utf-8
import numpy as np
import tensorflow.keras as keras
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler


def RNN(x_train, y_train):
    regressor = keras.Sequential()
    # add the first RNN and the Dropout to prevent overfitting
    regressor.add(keras.layers.SimpleRNN(units=50, return_sequences=True,
                                         input_shape=(x_train.shape[1], 1)))
    # if the return_sequences is True, it means that it has another rnn upward,and it return
    # (batch_size,time_step,units) shape matrix
    # otherwise it return (batch_size,units) matrix
    regressor.add(keras.layers.Dropout(0.2))

    # add the second one
    regressor.add(keras.layers.SimpleRNN(units=50, activation='tanh', return_sequences=True))
    regressor.add(keras.layers.Dropout(0.2))

    regressor.add(keras.layers.SimpleRNN(units=50))
    regressor.add(keras.layers.Dropout(0.2))
    # add the output layer
    regressor.add(keras.layers.Dense(units=1))
    # compile
    regressor.compile(optimizer='adam', loss='mean_squared_error')
    # a important point: we do not give a metric such as accuracy in the CNN or ohter neural network
    #
    regressor.fit(x=x_train, y=y_train, epochs=30, batch_size=32)
    # print(regressor.summary())
    return regressor


def LSTM(x_train, y_train):
    model = keras.Sequential()
    model.add(keras.layers.LSTM(units=10, activation='tanh', input_shape=(None, 1)))
    # ten units LSTM
    model.add(keras.layers.Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
    model.fit(x_train, y_train, batch_size=1, epochs=15)
    return model


def visualization(real, pred):
    plt.figure(figsize=(8, 4), dpi=80, facecolor='w', edgecolor='k')
    plt.plot(real, color="orange", label="Real value")
    # plt.plot(pred, color="c", label="RNN predicted result")
    plt.plot(pred, color='r', label='LSTM predicted result')
    plt.legend()
    plt.xlabel("Days")
    plt.ylabel("Values")
    plt.grid(True)
    plt.show()


if __name__ == "__main__":
    data = pd.read_csv(r"dataset\geogle_stock_price\archive\Google_Stock_Price_Train.csv")
    data = data.loc[:, ["Open"]].values

    train = data[:len(data) - 50]
    test = data[len(train):]
    train.reshape(train.shape[0], 1)
    scaler = MinMaxScaler(feature_range=(0, 1))
    train_scaled = scaler.fit_transform(train)

    # plt.plot(train_scaled)
    # plt.show()
    X_train = []
    Y_train = []

    time_step = 50
    for i in range(time_step, train_scaled.shape[0]):
        X_train.append(train_scaled[i - time_step:i, 0])
        Y_train.append(train_scaled[i, 0])

    X_train, Y_train = np.array(X_train), np.array(Y_train)
    X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)

    inputs = data[len(data) - len(test) - time_step:]
    inputs = scaler.transform(inputs)
    X_test = []
    for i in range(time_step, inputs.shape[0]):
        X_test.append(inputs[i - time_step:i, 0])
    X_test = np.array(X_test)
    X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)

    # model = RNN(X_train, Y_train)
    model = LSTM(X_train, Y_train)
    pred = model.predict(X_test)
    pred = scaler.inverse_transform(pred)

    visualization(real=test, pred=pred)

在这里插入图片描述

可以看到LSTM模型的预测效果比简单CNN要好很多,基本上复现了真实的股价,只是在时间上面有一些延迟。

modifie RNN

  • 1
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
Python中的TensorFlow库提供了一种方便的方式来编写RNN(循环神经网络)。 首先,我们需要导入TensorFlow库。在导入库之后,我们可以创建一个RNN模型。可以使用tf.keras.models.Sequential()函数来创建一个顺序模型。然后,我们可以添加层到模型中。对于RNN,我们需要添加tf.keras.layers.SimpleRNN()或者tf.keras.layers.LSTM()层。 例如,以下是创建一个简单RNN模型的代码: ``` import tensorflow as tf # 创建一个顺序模型 model = tf.keras.models.Sequential() # 添加一个RNN层 model.add(tf.keras.layers.SimpleRNN(units=64, activation='relu', input_shape=(timesteps, input_dim))) # 添加输出层 model.add(tf.keras.layers.Dense(units=10, activation='softmax')) # 编译模型 model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # 打印模型的摘要信息 model.summary() ``` 上述代码创建了一个简单RNN模型,包含一个RNN层和一个输出层。RNN层的`units`参数表示RNN层的神经元数目,`activation`参数定义了激活函数。`input_shape`参数定义了输入数据的形状。 在创建模型后,我们还需要编译模型。使用`model.compile()`函数来设置优化器、损失函数和评估指标。 最后,我们可以使用`model.summary()`函数来打印模型的摘要信息,其中包括每一层的名称、输出形状和参数数目等。 总而言之,Python中的TensorFlow库提供了创建RNN模型的便捷方法。通过导入库、创建模型、添加层以及编译模型等步骤,我们可以轻松地实现RNN模型。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

东风中的蒟蒻

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值