64、时间序列预测MA、WMA、EMA、AR、ARMA、ARIMA

1、移动平均MA采用历史平均作预测,WMA和EMA对远近的不同采用不同的权重

#移动平均适用于不快速增长也不快速下降,且不存在季节性因素的场景
import random as rd
import matplotlib.pyplot as plt
import math
rd.seed(1)

def dot_product(a, b):
    if len(a) == 0 or len(b) == 0 or len(a) != len(b):
        raise Exception("dot_product: wrong input")
    return sum([a[i]*b[i] for i in range(len(a))])

def series_generate(n, w, epsilon):
    p = len(w)
    X = [rd.random() for i in range(p)]
    for i in range(p, n):
        X.append(dot_product(X[-p:], w)+epsilon*(rd.random()-0.5))
    return X

def predict_generate_yes(X, p, is_print=False):
    X_predict = X[:p] + X[p-1:-1]
    if is_print:
        print("\nloss_yes:", sum([math.pow(X[i]-X_predict[i], 2) for i in range(len(X))]), "\n")
    return X_predict

def predict_MA(X):
    if len(X) > 0:
        return sum(X)/len(X)
    else:
        raise Exception("wrong input: len(X) == 0")

def predict_generate_MA(X, p, is_print=False):
    X_predict = X[:p] + [predict_MA(X[i:i+p]) for i in range(len(X)-p)]
    if is_print:
        print("\nloss_MA:", sum([math.pow(X[i]-X_predict[i], 2) for i in range(len(X))]), "\n")
    return X_predict

def w_WMA(p):
    return [2*i/p/(p+1) for i in range(1,p+1)]

def predict_WMA(w, X):
    return sum([w[i] * X[i] for i in range(len(w))])

def predict_generate_WMA(X, p, is_print=False):
    X_predict = X[:p] + [predict_WMA(w_WMA(p), X[i:i+p]) for i in range(len(X) - p)]
    if is_print:
        print("\nloss_WMA:", sum([math.pow(X[i]-X_predict[i], 2) for i in range(len(X))]), "\n")
    return X_predict

def predict_EMA(X, alpha):
    return sum([alpha*math.pow(1-alpha, len(X)-1-i)*X[i] for i in range(len(X))])

def f(X, p, alpha):
    X_predict = [x for x in X[:p]]
    if len(X) > p:
        X_predict.append(predict_EMA(X[:p], alpha))
    for i in range(len(X)-p-1):
        #加速版
        X_predict.append(alpha*X[p+i]+(1-alpha)*X_predict[-1])
        #基础版
        # X_predict.append(predict_EMA(X[:p+i+1], alpha))
    loss = sum([math.pow(X[i]-X_predict[i], 2) for i in range(len(X))])
    return loss, X_predict

def predict_generate_EMA(X, p, is_print=False):
    loss0, X_predict0 = f(X, p, 0.99)
    alpha0 = 0.99
    for alpha in range(99, 50, -1):
        loss, X_predict = f(X, p, alpha/100)
        if loss0 > loss:
            loss0, X_predict0 = loss, X_predict
            alpha0 = alpha/100
    if is_print:
        print("\nloss_EMA:", loss0, "\n")
    return alpha0, X_predict0

def Xs_plot(X, X_predict):

    #颜色集:"bgrcmykw"
    plt.plot(X, color="B")
    plt.plot(X_predict, color="G")
    plt.show()

if __name__ == '__main__':

    X = series_generate(n=100, w=[0.1,0.15,0.35,0.1,0.3], epsilon=0.3)

    # 训练检验示例
    x_predict_yes = predict_generate_yes(X, p=5, is_print=True)
    X_predict_MA = predict_generate_MA(X, p=5, is_print=True)
    X_predict_WMA = predict_generate_WMA(X, p=5, is_print=True)
    alpha, X_predict_EMA = predict_generate_EMA(X, p=5, is_print=True)
    Xs_plot(X, x_predict_yes)
    Xs_plot(X, X_predict_MA)
    Xs_plot(X, X_predict_WMA)
    Xs_plot(X, X_predict_EMA)

    # 预测示例
    result_yes = X[-1]
    result_MA = predict_MA(X[-5:])
    result_WMA = predict_WMA(w_WMA(5), X[-5:])
    result_EMA = predict_EMA(X, alpha = 0.9)

2、自回归AR对历史数据作回归

import random as rd
import matplotlib.pyplot as plt
from tool.LR_linear import *
rd.seed(1)

def dot_product(a, b):
    if len(a) == 0 or len(b) == 0 or len(a) != len(b):
        raise Exception("dot_product: wrong input")
    return sum([a[i]*b[i] for i in range(len(a))])

def series_generate(n, w, epsilon):
    p = len(w)
    X = [rd.random() for i in range(p)]
    for i in range(p, n):
        X.append(dot_product(X[-p:], w)+epsilon*(rd.random()-0.5))
    return X

def predict_AR(X, p, w):
    return dot_product(w, X[-p:])

def w_and_predict_generate_AR(X, p, is_print=False):
    X0 = [X[i:i+p] for i in range(len(X)-p)]
    Y0 = X[p:]
    w = Gradient(X0, Y0)
    X_predict = X[:p] + [dot_product(w, X0[i]) for i in range(len(X0))]
    if is_print:
        print("\nloss:", sum([math.pow(X_predict[i] - X[i], 2) for i in range(len(X))]), "\n")
    return w, X_predict

def Xs_plot(X, X_predict):
    #颜色集:"bgrcmykw"
    plt.plot(X, color="B")
    plt.plot(X_predict, color="G")
    plt.show()

if __name__ == '__main__':
    X = series_generate(n=100, w=[0.1,0.15,0.35,0.1,0.3], epsilon=0.3)
    w, X_predict = w_and_predict_generate_AR(X, p=5, is_print=True)
    Xs_plot(X, X_predict + [predict_AR(X, 5, w)])

3、ARMA在AR的基础上加入了均值

import random as rd
import matplotlib.pyplot as plt
from tool.LR_linear import *
import numpy as np
rd.seed(1)

def dot_product(a, b):
    if len(a) == 0 or len(b) == 0 or len(a) != len(b):
        raise Exception("dot_product: wrong input")
    return sum([a[i]*b[i] for i in range(len(a))])

def series_generate(n, w, epsilon):
    K = len(w)
    X = [rd.random() for i in range(K)]
    for i in range(K, n):
        X.append(dot_product(X[-K:], w)+epsilon*(rd.random()-0.5))
    return X

def predict_ARMA(X, p, q, w):
    return dot_product(w, X[-p:]+[np.mean(X[-q:])])

def w_and_predict_generate_ARMA(X, p, q, is_print=False):
    X0 = [X[i:i+p]+[np.mean(X[max(0, i+p-q):i+p])] for i in range(len(X)-p)]
    Y0 = X[p:]
    w = Gradient(X0, Y0)
    X_predict = X[:p] + [dot_product(w, X0[i]) for i in range(len(X0))]
    if is_print:
        print("\nloss:", sum([math.pow(X_predict[i] - X[i], 2) for i in range(len(X))]), "\n")
    return w, X_predict

def Xs_plot(X, X_predict):
    #颜色集:"bgrcmykw"
    plt.plot(X, color="B")
    plt.plot(X_predict, color="G")
    plt.show()

if __name__ == '__main__':
    X = series_generate(n=100, w=[0.1,0.15,0.35,0.1,0.3], epsilon=0.3)
    w, X_predict = w_and_predict_generate_ARMA(X, p=5, q=1, is_print=True)
    Xs_plot(X, X_predict+[predict_ARMA(X, p=5, q=1, w=w)])

4、ARIMA采用均值和历史差分作回归

import random as rd
import matplotlib.pyplot as plt
from tool.LR_linear import *
import numpy as np
rd.seed(1)

def dot_product(a, b):
    if len(a) == 0 or len(b) == 0 or len(a) != len(b):
        raise Exception("dot_product: wrong input")
    return sum([a[i]*b[i] for i in range(len(a))])

def series_generate(n, w, epsilon):
    K = len(w)
    X = [rd.random() for i in range(K)]
    for i in range(K, n):
        X.append(dot_product(X[-K:], w)+epsilon*(rd.random()-0.5))
    return X

def predict_ARIMA(X, p, q, w):
    if len(X) < p + 2:
        if len(X) == 0:
            return 0
        else:
            return sum(X)/len(X)
    dX = [X[i + 1] - X[i] for i in range(len(X) - 1)]
    return dot_product(dX[-p:]+[np.mean(dX[-p-q+1:-p+1])], w)+X[-1]

def w_and_predict_generate_ARIMA(X, p, q, is_print=False):

    if len(X) < p+2:
        return [], X

    dX = [X[i + 1] - X[i] for i in range(len(X) - 1)]

    X0 = [dX[i:i+p]+[np.mean(dX[max(0, i+p-q):i+p])] for i in range(len(dX)-p)]
    Y0 = dX[p:]
    w = Gradient(X0, Y0)
    dX_predict = dX[:p] + [dot_product(w, X0[i]) for i in range(len(X0))]
    if is_print:
        print("\nloss:", sum([math.pow(dX_predict[i] - dX[i], 2) for i in range(len(dX))]), "\n")

    X_predict = [X[0]] + [X[i]+dX_predict[i] for i in range(len(dX_predict))]

    return w, X_predict

def Xs_plot(X, X_predict):
    #颜色集:"bgrcmykw"
    plt.plot(X, color="B")
    plt.plot(X_predict, color="G")
    plt.show()

if __name__ == '__main__':
    X = series_generate(n=20, w=[0.1,0.15,0.35,0.1,0.3], epsilon=0.3)
    w, X_predict = w_and_predict_generate_ARIMA(X, p=5, q=1, is_print=True)
    Xs_plot(X, X_predict + [predict_ARIMA(X, p=5, q=1, w=w)])

5、以上代码中梯度下降法

import random as rd
import matplotlib.pyplot as plt
import functools
import math
import numpy as np


def sample_generate(n, low, sup, w, e):
    X = [[(sup - low) * rd.random() + low if j < len(w) - 1 else 1 for j in range(len(w))] for i in range(n)]
    Y = [sum([w[j] * X[i][j] for j in range(len(w))]) + e * (rd.random() - 0.5) for i in range(n)]
    return X, Y


def inv_calculate(X, Y):
    X = np.mat(X)
    Y = np.mat([[Y[i]] for i in range(len(Y))])
    w = np.linalg.inv(X.T * X) * X.T * Y
    w = w.tolist()
    return [w[i][0] for i in range(len(w))]


def LR_2_plot(X, Y, k, b):
    X = [X[i][0] for i in range(len(X))]

    x0, x1 = min(X), max(X)
    y0, y1 = k * x0 + b, k * x1 + b
    # 点、线展示
    plt.scatter(X, Y)
    plt.plot([x0, x1], [y0, y1], color='r')
    plt.show()


def f(w, X, Y):
    return (w.T * X.T * X * w - 2 * Y.T * X * w)[0, 0]


def mod(dw):
    return math.sqrt(sum([dw[i, 0] * dw[i, 0] for i in range(dw.size)]))


# r为放缩尺度, step_min为最小步长
def Gradient(X, Y, r=0.8, step_min=0.00001):
    d = len(X[0])
    X = np.mat(X)
    Y = np.mat([[Y[i]] for i in range(len(Y))])
    w = [[0] for i in range(d)]
    w = np.mat(w)

    # 先采用放缩法确定寻优区间,由二分法确定寻优步长
    while True:
        left = 0
        right = 1
        dw = -X.T * X * w + X.T * Y
        while f(w + right * dw, X, Y) < f(w, X, Y):
            right = right * (2 - r)

        mid1 = left * 2 / 3 + right / 3
        mid2 = left / 3 + right * 2 / 3
        while abs(left - right) * mod(dw) > step_min:
            if f(w + mid1 * dw, X, Y) < f(w + mid2 * dw, X, Y):
                right = mid2
            else:
                left = mid1
            mid1 = left * 2 / 3 + right / 3
            mid2 = left / 3 + right * 2 / 3

        if left * mod(dw) < step_min:
            break
        w = w + left * dw

    w = w.tolist()
    return [w[i][0] for i in range(len(w))]


if __name__ == "__main__":
    # y = wTx
    X, Y = sample_generate(n=100, low=10, sup=20, w=[2, 3, 4], e=1)

    # 解析解,求逆得到, w = (X.T*X)-1 * X.T * Y
    # w = inv_calculate(X,Y)
    # print("\nk: ", w[0])
    # print("\nb: ", w[1])
    # LR_2_plot(X, Y, w[0], w[1])

    # 梯度下降法
    w = Gradient(X, Y)
    print("\nw: ", w)
    # print("\nk: ", w[0])
    # print("\nb: ", w[1])
    # LR_2_plot(X, Y, w[0], w[1])

 

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值