使用keras进行时间序列数据预测

代码

makegenerators.py

import os

import tushare as ts
import numpy as np
import pandas as pd


def makegenerators(ts_code='600004.SH', batch_size=1024, start_rate='', end_rate=''):
    # 判断文件是否存在,不存在则通过网络接口获得
    data_dir = '../data/'
    if os.path.exists(data_dir + ts_code + '.csv'):
        df = pd.read_csv(data_dir + ts_code + '.csv')
    else:
        # 初始化pro接口
        # pro = ts.pro_api('ed35503ddab56a5ae561f5fd7891d2b97b6546965aa98ba2df414bc4')
        # 获取前复权数据
        df = ts.pro_bar(ts_code=ts_code, adj='qfq')
        # 保存数据到文件
        df.to_csv(data_dir + ts_code + '.csv', index=False)
    # ts_code, trade_date, open, high, low, close, pre_close, change, pct_chg, vol, amount, adj_factor
    # 股票代码, 交易日期, 开盘价, 最高价, 最低价, 收盘价, 昨收价, 涨跌额, 涨跌幅, 成交量, 成交额(千元)
    # 筛选出 收盘价, 最高价, 最低价, 成交量
    df = df[['close', 'high', 'low', 'amount']]
    # 倒序
    df = df.sort_index(ascending=False)
    data = df.values
    # 计算错误行
    errindex = []
    for i in range(data.shape[0]):
        for j in range(data.shape[1]):
            if data[i, j] == 0 or np.isnan(data[i, j]):
                errindex.append(i)
    data = np.delete(data, errindex, axis=0)  # 删除错误行
    # 检查是否有错误值
    if np.isnan(data).any():
        print('nan in %s' % ts_code)
        return

    # 定义参数
    lookback = 261  # 观察将追溯261个交易日(大概一年)。
    step = 1  # 观测将在每小时一个数据点取样。
    delay = 22  # 目标是未来第22个交易日(大概一月)。
    uprate = 0.10  # 预测目标时间是否上涨10%

    # 定义分割数据集
    row = len(data)
    if start_rate == '':
        start = 0
    else:
        start = int(np.floor(start_rate * row))
    if end_rate == '':
        end = row
    else:
        end = int(np.floor(end_rate * row))
    weight = end - start - lookback - delay
    # 检查数据大小是否够一个batch_size
    if weight // batch_size < 1:
        print('%s(%s) is too small for a batch_size(%s)' % (ts_code, weight, batch_size))
        return

    # 标准化
    mean = data.mean(axis=0)  
    data -= mean
    std = data.std(axis=0) 
    data /= std

    # 数据生成器
    def generator(data, lookback, delay, uprate, min_index, max_index,
                  shuffle=False, batch_size=128, step=1):
        """
        :param data: 数据
        :param lookback: 判断依据回溯时间
        :param delay: 预测目标延迟时间
        :param uprate: 预测目标提升比例
        :param min_index: 使用的数据开始位置
        :param max_index: 使用的数据结束位置
        :param shuffle: 是否打乱
        :param batch_size: 批大小
        :param step: 数据中取数据的间隔
        :return: X[batch_size, None, data.shape[1]], y[batch_size,]
        """
        if max_index is None:
            max_index = len(data) - delay - 1
        else:
            max_index -= delay
        i = min_index + lookback
        while 1:
            if shuffle:
                rows = np.random.randint(
                    min_index + lookback, max_index, size=batch_size)
            else:
                if i + batch_size >= max_index:
                    i = min_index + lookback
                rows = np.arange(i, min(i + batch_size, max_index))
                i += len(rows)
            samples = np.zeros((len(rows),
                                lookback // step,
                                data.shape[-1]))
            targets = np.zeros((len(rows),))
            for j, row in enumerate(rows):
                indices = range(rows[j] - lookback, rows[j], step)
                samples[j] = data[indices]
                if data[rows[j] + delay][0] * std[0] + mean[0] > (samples[j][-1][0] * std[0] + mean[0]) * (1 + uprate):
                    targets[j] = 1
                else:
                    targets[j] = 0
            yield samples, targets

    # 构建生成器
    train_gen = generator(data,
                          lookback=lookback,
                          delay=delay,
                          uprate=uprate,
                          min_index=start,
                          max_index=end - 1,
                          shuffle=True,
                          step=step,
                          batch_size=batch_size)
    return train_gen, weight

main.py

import os
import random

import tushare as ts
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd

import tensorflow as tf
import keras
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
from keras import backend as K
import keras.backend.tensorflow_backend as KTF

import makegenerators as makg

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.keras.backend.set_session(tf.Session(config=config))

# 设置token 只需要在第一次或者token失效后调用,完成调取tushare数据凭证的设置,正常情况下不需要重复设置。
# ts.set_token('******')

# 判断文件是否存在,不存在则通过网络接口获得
data_dir = '../data/'
if os.path.exists(data_dir + 'code_list.csv'):
    code_list = pd.read_csv(data_dir + 'code_list.csv')
else:
    # 初始化pro接口
    pro = ts.pro_api('******')
    # 查询所有上交上市公司
    code_list = pro.stock_basic(exchange='SSE', list_status='L', fields='ts_code,symbol,name,market,list_date')
    # 保存数据到文件
    code_list.to_csv(data_dir + 'code_list.csv', index=False)
# 筛选主板内容
code_list = code_list[code_list.market == '主板'][['ts_code']].values.flatten()

batch_size = 512
gen_list = []
weight_list = []
for code in code_list[0:800]:
    print(code)
    ret = makg.makegenerators(code, batch_size=batch_size)
    if ret is not None:
        gen_list.append(ret[0])
        weight_list.append(ret[1])

val_gen_list = []
val_weight_list = []
for code in code_list[800:1000]:
    print(code)
    ret = makg.makegenerators(code, batch_size=batch_size)
    if ret is not None:
        val_gen_list.append(ret[0])
        val_weight_list.append(ret[1])


# 整合生成器
def cgenerator(gen_list, weight_list):
    while True:
        gen_index = random.choices(range(len(weight_list)), weight_list)[0]
        yield next(gen_list[gen_index])


cgen = cgenerator(gen_list, weight_list)
val_cgen = cgenerator(val_gen_list, val_weight_list)
val_steps = 100
shape = 4

# 计算分类权重
# class_weight = {0: 1, 1: 3.195042915612453}  # lookback = 261, delay = 22, uprate = 0.10
# class_weight = {0: 1, 1: 37.71944922547332}  # lookback = 261, delay = 1, uprate = 0.10
# class_weight = {0: 1, 1: 1.0047035365638006}  # lookback = 261, delay = 1, uprate = 0.00
class_weight = {0: 0, 1: 0}
normal = []
train_steps = sum(weight_list) // batch_size
for i in range(train_steps):
    x, y = next(cgen)
    class_weight[0] += sum(y)
    class_weight[1] += len(y) - sum(y)
    # normal.append(x)
class_weight[1] /= class_weight[0]
class_weight[0] = 1


# 计算中值和方差
# normala = np.zeros((len(normal) * batch_size, normal[0].shape[1], normal[0].shape[2]))
# for i in range(len(normal)):
#     normala[i*batch_size:i*batch_size+batch_size] = normal[i]
# mean = normala.mean(axis=(0, 1))
# std = normala.std(axis=(0, 1))


# 可视化
def show_train_history(train_history, train_metrics, validation_metrics):
    plt.plot(train_history.history[train_metrics])
    plt.plot(train_history.history[validation_metrics])
    # plt.title('Train History')
    plt.ylabel(train_metrics)
    plt.xlabel('Epoch')
    plt.legend(['train', 'validation'], loc='upper left')


# 显示训练过程
def plot_history(history):
    plt.figure(figsize=(12, 8))
    plt.subplot(2, 2, 1)
    show_train_history(history, 'binary_accuracy', 'val_binary_accuracy')
    plt.subplot(2, 2, 2)
    show_train_history(history, 'loss', 'val_loss')
    plt.subplot(2, 2, 3)
    show_train_history(history, 'recall', 'val_recall')
    plt.subplot(2, 2, 4)
    show_train_history(history, 'precision', 'val_precision')
    plt.savefig('./model/auto_save.jpg')
    plt.show()


# 正样本中有多少被识别为正样本
def recall(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    real_true = K.sum(y_true)
    return true_positives / (real_true + K.epsilon())


# 识别为正样本中有多少是正样本
def precision(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predict_true = K.sum(K.round(K.clip(y_pred, 0, 1)))
    return true_positives / (predict_true + K.epsilon())


# 加载模型时使用 keras.models.load_model(path, custom_objects=dependencies)
dependencies = {
    'recall': recall,
    'precision': precision
}

# **************** 建模(Dense,Deep)recall:0.75,precision:0.25 (400*80不收敛)
# model = Sequential()
# model.add(layers.Flatten(input_shape=(261, shape)))
# model.add(layers.Dense(512, activation='relu'))
# model.add(layers.Dropout(0.5))
# model.add(layers.Dense(256, activation='relu'))
# model.add(layers.Dense(128, activation='relu'))
# model.add(layers.Dropout(0.5))
# model.add(layers.Dense(64, activation='relu'))
# model.add(layers.Dense(32, activation='relu'))
# model.add(layers.Dropout(0.5))
# model.add(layers.Dense(16, activation='relu'))
# model.add(layers.Dense(8, activation='relu'))
# model.add(layers.Dense(1))
# model.compile(optimizer=keras.optimizers.RMSprop(), loss=keras.losses.binary_crossentropy,
#               metrics=[keras.metrics.binary_accuracy, recall, precision])
#
# history = model.fit_generator(cgen,
#                               steps_per_epoch=400,  # 1min/epoch
#                               epochs=120,
#                               validation_data=val_cgen,
#                               validation_steps=val_steps,
#                               class_weight=class_weight,
#                               verbose=1)
# **************** 建模(GRU,DroupOut,Deep)recall:0.61,precision:0.27(100*250收敛)
# model = Sequential()
# model.add(layers.GRU(32,
#                      dropout=0.1,
#                      recurrent_dropout=0.5,
#                      return_sequences=True,
#                      input_shape=(None, shape)))
# model.add(layers.GRU(64,
#                      dropout=0.1,
#                      recurrent_dropout=0.5))
# model.add(layers.Dense(64, activation='relu'))
# model.add(layers.Dense(32, activation='relu'))
# model.add(layers.Dense(1, activation='sigmoid'))
# model.compile(optimizer=keras.optimizers.RMSprop(), loss=keras.losses.binary_crossentropy,
#               metrics=[keras.metrics.binary_accuracy, recall, precision])
#
# history = model.fit_generator(cgen,
#                               steps_per_epoch=100,  # 1min/epoch
#                               epochs=1,
#                               validation_data=val_cgen,
#                               validation_steps=val_steps,
#                               class_weight=class_weight,
#                               verbose=1)
# **************** 建模(Conv,Deep)recall:0.75,precision:0.27(1000*120收敛)
# loss:0.65,recall0.63,precision:0.60(1000*960收敛)
# model = Sequential()
# kernel_size = 4
# dropout_rate = 0.4
# model.add(layers.Conv1D(8, kernel_size=kernel_size, strides=2, padding='same',
#                         input_shape=(261, shape)))
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Dropout(dropout_rate))
# model.add(layers.Conv1D(16, kernel_size=kernel_size, strides=2, padding='same'))
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Dropout(dropout_rate))
# model.add(layers.Conv1D(32, kernel_size=kernel_size, strides=2, padding='same'))
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Dropout(dropout_rate))
# model.add(layers.Conv1D(64, kernel_size=kernel_size, strides=2, padding='same'))
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Dropout(dropout_rate))
# model.add(layers.Conv1D(128, kernel_size=kernel_size, strides=2, padding='same'))
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Dropout(dropout_rate))
# model.add(layers.Conv1D(256, kernel_size=kernel_size, strides=2, padding='same'))
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Dropout(dropout_rate))
# model.add(layers.Conv1D(512, kernel_size=kernel_size, strides=2, padding='same'))
# model.add(layers.BatchNormalization())
# model.add(layers.LeakyReLU())
# model.add(layers.Dropout(dropout_rate))
# model.add(layers.Flatten())
# model.add(layers.Dense(1, activation='sigmoid'))
# model.compile(optimizer=keras.optimizers.Adam(lr=0.001, epsilon=1e-8, decay=1e-4),
#               loss=keras.losses.binary_crossentropy,
#               metrics=[keras.metrics.binary_accuracy, recall, precision])
#
# checkpoint = keras.callbacks.ModelCheckpoint('./model/auto_save_best.model', monitor='val_loss',
#                                              verbose=1, save_best_only=True, mode='min')
# learning_rate_reduction = keras.callbacks.ReduceLROnPlateau(monitor='loss', patience=10,
#                                                             factor=0.5, min_lr=1e-6, verbose=1)
# callbacks_list = [checkpoint]
# history = model.fit_generator(cgen,
#                               steps_per_epoch=1000,  # 1min/epoch
#                               epochs=900,
#                               validation_data=val_cgen,
#                               validation_steps=val_steps,
#                               callbacks=callbacks_list,
#                               class_weight=class_weight,
#                               verbose=1)
# **************** 建模(GRU,Dense,Deep)recall:0.6,precision:0.3(1000*500收敛)
# loss:0.62,recall0.60,precision:0.635(1500*260收敛)
dropout_rate = 0.4
model = Sequential()
model.add(layers.CuDNNGRU(32,
                          return_sequences=True,
                          input_shape=(None, shape)))
model.add(layers.Dropout(dropout_rate))
model.add(layers.CuDNNGRU(64))
model.add(layers.Dropout(dropout_rate))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(dropout_rate))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dropout(dropout_rate))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=keras.optimizers.RMSprop(1e-6),
              loss=keras.losses.binary_crossentropy,
              metrics=[keras.metrics.binary_accuracy, recall, precision])

# model = keras.models.load_model('./model/cudnnGRU260.model', custom_objects=dependencies)
# model.load_weights('./model/cudnnGRU260.weight')
checkpoint = keras.callbacks.ModelCheckpoint('./model/auto_save_best.model', monitor='val_loss',
                                             verbose=1, save_best_only=True, mode='min')
learning_rate_reduction = keras.callbacks.ReduceLROnPlateau(monitor='loss', patience=10,
                                                            factor=0.5, min_lr=1e-8, verbose=1)
callbacks_list = [checkpoint, learning_rate_reduction]
history = model.fit_generator(cgen,
                              steps_per_epoch=1500,  # 1min/epoch
                              epochs=120,
                              validation_data=val_cgen,
                              validation_steps=val_steps,
                              class_weight=class_weight,
                              callbacks=callbacks_list
                              )

model.save('./model/auto_save.model')
model.save_weights('./model/auto_save.weight')
plot_history(history)

效果展示

DNN

迅速收敛到precision=0,练不出来.

1DCNN

预测一个月后数据是否上升10%:
val_precision在300epoch大致收敛于27%.
在这里插入图片描述
预测一天后数据是否上升:
val_precision在800epoch大致收敛于59.7%.
在这里插入图片描述
预测一个月后数据是否上升:
val_precision在2000epoch大致收敛于66%.
(图为最后120epoch.)
在这里插入图片描述

GRU

预测一个月后数据是否上升10%:
val_precision在300epoch大致收敛于27%.
在这里插入图片描述
预测一天后数据是否上升:
val_precision在300epoch大致收敛于62%.
(图为最后120epoch.)
在这里插入图片描述
预测一个月后数据是否上升:
val_precision在600epoch大致收敛于63%.在这里插入图片描述

坑与总结

  • 最后得到最好的模型预测一个月后数据是否上升的正确率在测试集中达到了68.8%
  • 金融数据使用tushare的接口获取(需要注册然后修改下个人资料,然后拿个token才能用)
  • 使用CuDNNGRU的时候经常会出现CUDNN_STATUS_INTERNAL_ERROR in cudnnRNNForwardTraining,搜索到的方法都没用,后来降低了batch_size然后做了学习率递减之后就没怎么出现了.
  • 对于不平衡数据集(例如预测一个月后数据是否上升10%,正负比达到了1:3),所使用的办法是通过fit()函数中的class_weight来进行学习权重的平衡(即使得正负权重为3:1),但是这种方法会导致训练曲线波动大,不稳定,最好还是想办法在数据集层面进行平衡.
  • 本文进行了多支股票的时间序列预测,为了方便构建数据集,每支股票都构建了一个Generator,然后根据各个Generator的数据量加权构建一个整合的Generator.这样就出现了一个问题,每个Generator的数据集都是分开处理的,这处理就包括了归一化.所以每支股票的数据都是单独进行归一化的.发现这个问题后通过手动计算所有数据的中值和方差进行了统一归一化,却发现效果不如单独归一化.
  • CuDNNGRU比GRU要快很多,但是不是很稳定,但是没办法,GRU实在是太慢了.
  • 使用CuDNNGRU时经常会出现本来训练的好好的,突然loss就暴增的情况(如下图),暂时还没找到什么解决办法.只能使用ModelCheckpoint()的callback来进行中途保存,遇到这种情况就重新从断点训练就好了.
    在这里插入图片描述
  • 1
    点赞
  • 21
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值