#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 18 15:49:42 2019
@author: vicky
"""
#导入数值计算库
import numpy as np
from numpy import linalg as la
import pandas as pd
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import adfuller
import statsmodels.tsa.stattools as st
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.arima_model import ARIMA
import copy
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
import warnings
from statsmodels.stats.diagnostic import acorr_ljungbox
import arch
#-------确定差分阶数
#----curvation,一阶
#平稳性检测,通过
adfuller(ts_curvation) #p值<0.05
#自相关性图,未通过
plot_acf(ts_curvation,lags=100)
plt.show()
#白噪声检验,通过
acorr_ljungbox(ts_curvation, lags=1)#p值<0.05
#一阶curvation_d
ts_curvation_d = ts_curvation.diff().dropna()
ts_curvation_d.columns = ['diff']
#序列图
ts_curvation_d.plot(title= 'Difference Curve of Curvation')
plt.show()
#平稳性检测,通过
adfuller(ts_curvation_d)
#自相关性图,通过
plot_acf(ts_curvation_d,lags=100)
plt.show()
#白噪声检验,通过
acorr_ljungbox(ts_curvation_d, lags= 1)#p值=0.013,置信度取0.05,算是通过随机性检验了
##二阶curvation_d
#ts_curvation_dd = ts_curvation_d.diff().dropna()
#ts_curvation_dd.columns = ['diff']
##序列图
#ts_curvation_dd.plot(title= 'Second Order Difference Curve of Curvation')
#plt.show()
##平稳性检测,通过
#adfuller(ts_curvation_dd)
##自相关性图,通过
#plot_acf(ts_curvation_dd,lags=100)
#plt.show()
##plot_pacf(ts_curvation_dd)
##plt.show()
##白噪声检验,通过
#acorr_ljungbox(ts_curvation_dd, lags= 1)#p值<0.01
#--------确定ARIMA的阶数
#----利用自相关图和偏自相关图
def threeplots(ts,titlename):
#时序图
ts.plot(figsize=(12,6),title='Difference Curve of '+titlename,fontsize=8)
plt.show()
#自相关图 和 偏自相关图
#不限制lags值时图看不清,限制lags值再画
plot_acf(ts,lags=50)
plt.title('Acf Plot of '+titlename)
plt.show()
plot_pacf(ts,lags=50)
plt.title('Pacf Plot of '+titlename)
plt.show()
return 0
threeplots(ts_curvation_d,'Curvation') #p=5,d=1,q=5
#----借助AIC、BIC统计量自动确定
#在statsmodels包里还有更直接的函数:
order = st.arma_order_select_ic(ts_curvation_d,max_ar=20,max_ma=20,ic=['aic', 'bic', 'hqic'])
order.aic_min_order
order.bic_min_order
order.hqic_min_order
#输出(0,12)(3,1)(3,1)
##借助AIC统计量自动确定,可换成bic
def proper_model(data_ts, maxLag):
init_aic = float("inf")
init_p = 0
init_q = 0
init_properModel = None
for p in range(1,maxLag):
for q in range(1,maxLag):
model = ARMA(data_ts, order=(p, q))
try:
results_ARMA = model.fit(disp=-1, method='css')
except:
continue
aic = results_ARMA.aic
#aic = results_ARMA.bic
print('p:',p,'q:',q,'bic:',aic)
if aic < init_aic:
init_p = p
init_q = q
init_properModel = results_ARMA
init_aic = aic
return init_aic, init_p, init_q, init_properModel
proper_model(ts_curvation_d,20) #(12,10)(12,1)
def findmodel(ts,maxLag):
#init_aic = float("inf")
init_p = 0
init_q = 0
init_properModel = None
init_wucha=float("inf")
for p in range(1,maxLag):
for q in range(1,maxLag):
model = ARIMA(ts, order=(p, 1, q)) #ARIMA(p,d,q)
try:
results_AR = model.fit(disp=-1) #disp为-1代表不输出收敛过程的信息,True代表输出
#print('p:',p,'q:',q,'AIC:',results_AR.aic)
except:
continue
predictions_diff = pd.Series(results_AR.fittedvalues, copy=True)
predictions_diff_cumsum = predictions_diff.cumsum() #从第一个开始累计想加
predictions = pd.Series(ts.ix[0], index=ts.index)
predictions = predictions.add(predictions_diff_cumsum,fill_value=0)
#偏差率,平均绝对误差
wucha=sum(abs((predictions-ts)/ts))/len(ts)
print('p:',p,'q:',q,'wucha:',wucha)
#aic = results_AR.aic
if wucha < init_wucha:
init_p = p
init_q = q
init_properModel = results_AR
init_wucha = wucha
return init_p, init_q, init_properModel,init_wucha
findmodel(ts_curvation,20) #9,6
#---------建模预测
#----一阶差分
#model = ARIMA(ts_curvation, order=(10,1,10)) #ARIMA(p,d,q) 经常会报错
#results_AR_curvation = model.fit(disp=-1) #disp为-1代表不输出收敛过程的信息,True代表输出
##results_AR_curvation.summary2()
#改用sarimax包
arima = sm.tsa.statespace.SARIMAX(ts_curvation_d,order=(5,0,5),freq='D',
seasonal_order=(0,0,0,0),enforce_stationarity=False, enforce_invertibility=False).fit()
arima.summary()
#We can use SARIMAX model as ARIMAX when seasonal_order is (0,0,0,0) .
#注意这里用的是ts_curvation_d算,因为SARIMAX包的fittedvalues直接给出原始拟合值而不是差分拟合值
plt.plot(ts_curvation_d)
plt.plot(arima.fittedvalues, color='red')#红色线代表差分的预测值
rss=sum(((arima.fittedvalues-ts_curvation_d).dropna())**2)
print('RSS:',rss)
plt.title('RSS: %.4f'% rss)#残差平方和
plt.show()
#计算模型历史预测值和偏差率
predictions_d = pd.Series(arima.fittedvalues, copy=True)
predictions_d_cumsum = predictions_d.cumsum() #从第一个开始累计想加
predictions = pd.Series(ts_curvation.ix[0], index=ts_curvation.index)
predictions = predictions.add(predictions_d_cumsum,fill_value=0)
#偏差率,平均绝对误差
#predictions=arima.fittedvalues
res=abs((predictions-ts_curvation)/ts_curvation)
wucha=sum(res)/len(ts_curvation)
print('wucha',wucha)
#0.2662830117474497
#正负一致率
rate=signrate(ts_curvation,predictions)
print(rate)
#0.5611267605633803
#回测曲线与原始数据对比图
plt.figure()
plt.plot(ts_curvation)
plt.plot(predictions,color='red')
plt.title(('MAE: %.4f'% wucha,'Rate: %.4f'% rate))
plt.show()
#------白噪声检验
def jianyan(results_AR):
#残差的自相关图
resid = results_AR.resid#残差
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = plot_acf(resid.values.squeeze(), lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = plot_pacf(resid, lags=40, ax=ax2)
#做D-W检验
print('D-W检验结果:',sm.stats.durbin_watson(results_AR.resid.values))
#0≤DW≤4,其中 DW=O=>ρ=1即存在正自相关性; DW=4<=>ρ=-1即存在负自相关性; DW=2<=>ρ=0即不存在(一阶)自相关性
#所以,当DW值显著的接近于O或4时,则存在自相关性,而接近于2时,则不存在(一阶)自相关性
#检验结果是1.998,说明不存在自相关性
#观察是否符合正态分布
resid = results_AR.resid#残差
fig = plt.figure()
ax = fig.add_subplot(111)
fig = qqplot(resid, line='q', ax=ax, fit=True)
#Ljung-Box检验
r,q,p = sm.tsa.acf(resid.values.squeeze(), qstat=True)
data = np.c_[range(1,41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
#6期和12期的p值均大于0.05,所以残差序列不存在自相关性
return 0
jianyan(results_AR_curvation)
#6期和12期<0.05,残差通过检验
#----预测未来走势
results_AR=results_AR_curvation
# forecast方法会自动进行差分还原,当然仅限于支持的1阶和2阶差分
forecast_n = 1000#预测未来n天走势
forecast_AR = results_AR.forecast(forecast_n)[0] #返回预测值,标准差,置信区间
print(forecast_AR)
stderr_AR=sum(results_AR.forecast(forecast_n)[1])
print(stderr_AR)
#将预测的数据和原来的数据绘制在一起,为了实现这一目的,我们需要增加数据索引,使用开源库arrow:
import arrow
def get_date_range(start, limit, level='day',format='YYYY-MM-DD'):
start = arrow.get(start, format)
result=(list(map(lambda dt: dt.format(format) , arrow.Arrow.range(level, start,limit=limit))))
dateparse2 = lambda dates:pd.datetime.strptime(dates,'%Y-%m-%d')
return map(dateparse2, result)
#预测从训练数据最后一个数据的后一个日期开始
new_index = get_date_range('2019-02-25', forecast_n)
forecast_ARIMA= pd.Series(forecast_AR, copy=True, index=new_index)
print(forecast_ARIMA.head())
#绘图如下
plt.plot(ts_curvation,label='Original',color='blue')
plt.plot(forecast_ARIMA, label='Forcast',color='red')
plt.legend(loc='best')
plt.title('forecast')
#----二阶差分
#----差分还原
predictions_dd = pd.Series(arima.fittedvalues, copy=True)
predictions_dd_cumsum = predictions_dd.cumsum() #cumsum从第一个开始累计加和
predictions_d = pd.Series(ts_curvation_d.ix[0], index=ts_curvation_d.index)
predictions_d = predictions_d.add(predictions_dd_cumsum,fill_value=0) #同序列值相加
plt.figure()
plt.plot(ts_curvation_d)
plt.plot(predictions_d,color='red')
plt.show()
predictions_d = pd.Series(arima.fittedvalues, copy=True)
predictions_d_cumsum = predictions_d.cumsum() #cumsum从第一个开始累计加和
predictions = pd.Series(ts_curvation.ix[0], index=ts_curvation.index)
predictions = predictions.add(predictions_d_cumsum,fill_value=0) #同序列值相加
#偏差率,平均绝对误差
res=abs((predictions-ts_curvation)/ts_curvation)
wucha=sum(res)/len(ts_curvation)
print('wucha',wucha)
#0.26
#回测曲线与原始数据对比图
plt.figure()
plt.plot(ts_curvation)
plt.plot(predictions,color='red')
plt.title('MAE: %.4f'% wucha)
plt.show()
#----换一种方法还原差分,直接用dd计算pre
from itertools import accumulate
y_pre=pd.Series(ts_curvation[0], index=ts_curvation.index)
for i in range(1,len(y_pre)):
d=np.array(predictions_dd[:i-1])
w=np.array(range(i-1,0,-1))
y_pre[i]=np.dot(w,d)+(i-1)*ts_curvation_d[0]+ts_curvation[0]
plt.figure()
plt.plot(ts_curvation)
plt.plot(y_pre,color='red')
#plt.title('MAE: %.4f'% wucha)
plt.show()
y_pre-predictions
#偏差率,平均绝对误差
res=abs((predictions-ts_curvation)/ts_curvation)
wucha=sum(res)/len(ts_curvation)
print('wucha',wucha)
#回测曲线与原始数据对比图
plt.figure()
plt.plot(ts_curvation_d)
plt.plot(predictions_d,color='red')
# plt.title('RMSE: %.4f'% np.sqrt(sum((predictions-ts)**2)/len(ts)))
plt.title('MAE: %.4f'% wucha)
plt.show()
#差分正负个数一致率
def signrate(a,b):
a=np.sign(a.diff())
b=np.sign(b.diff())
c=(a-b).dropna()
return (c[c==0].count())/len(c)
#正负一致率
rate=signrate(ts_curvation,predictions)
print(rate)
#0.561
#------------ar-arch模型 (一阶差分和二阶差分效果都不好)
#---一阶差分
#对curvation_d建立 AR(p) 模型,并判断阶数
fig = plt.figure(figsize=(15,6))
ax1 = fig.add_subplot(111)
fig = plot_pacf(ts_curvation_d,lags = 50,ax=ax1) #p=10
#根据图,建立 AR(14)模型,即均值方程
model = ARMA(ts_curvation_d, order=(10,0)).fit(disp=-1) #ARMA(p,q)
#----ARCH 效应检验
#计算均值方程的残差:a_t = r_t - mu_t,并作出残差和残差平方图像:
at = ts_curvation_d - model.fittedvalues
at2 = np.square(at)
plt.figure(figsize=(10,6))
plt.subplot(211)
plt.plot(at,label = 'at')
plt.legend()
plt.subplot(212)
plt.plot(at2,label='at^2')
plt.legend(loc=0)
#残差自相关下性检验:原假设H0:序列没有相关性,备择假设H1:序列具有相关性
#Ljung-Box检验
acf,q,p = sm.tsa.acf(at2,nlags=20,qstat=True) ## 计算自相关系数及p-value
out = np.c_[range(1,21), acf[1:], q, p]
output=pd.DataFrame(out, columns=['lag', "AC", "Q", "P-value"])
output = output.set_index('lag')
output
#p<0.05,认为残差序列具有相关性,符合异方差的假设,因此具有 ARCH 效应。
#参数均显著,说明残差有长期自相关性,需要很大的q值。
#----ARCH模型建立
#ARCH模型的阶次,可以用{at^2}序列的偏自相关函数PACF来确定:
figure = plt.figure(figsize=(20,5))
ax1 = figure.add_subplot(111)
fig = plot_pacf(at2,lags = 50, ax=ax1) #q=5
#train_ARCH = ts_slope['2012-01-04':'2019-01-04']
#test_ARCH = ts_slope['2019-01-05':'2019-02-25']
#train_ARCH = ts_slope[:-10]
#test_ARCH = ts_slope[-10:]
#粗略选择均值模型为AR(10)模型,波动率模型选择ARCH(4)模型
train_ARCH = ts_curvation_d
am_ARCH = arch.arch_model(train_ARCH, mean='AR', lags=10, vol='ARCH', p=4)
res_ARCH = am_ARCH.fit()
res_ARCH.summary()
res_ARCH.params
#日收益率期望大约为0.005%, AdjR^2值=0.453,模型拟合效果不好
#ARCH模型的拟合效果
res_ARCH.hedgehog_plot()
#可以看出,虽然具体值差距挺大,但是均值和方差的变化相
#res_ARCH.hedgehog_plot(type='volatility')
res_ARCH.plot()
pre_ARCH = res_ARCH.forecast(horizon=1,start=9,method='simulation')
#pre_mean=pre_ARCH.mean.iloc[0:]
pre_mean=pre_ARCH.mean
print(pre_mean)
resid=pre_ARCH.residual_variance
print(resid)
var=pre_ARCH.variance
print(var)
pre =pd.Series(pre_mean['h.1'], index=train_ARCH.index)
#偏差率,平均绝对误差
res=(abs((train_ARCH-pre)/pre)).dropna()
wucha=sum(res)/len(res.dropna())
print('wucha',wucha)
#22.53
#回测曲线与原始数据对比图
plt.figure()
plt.plot(train_ARCH)
plt.plot(pre, color='red')#红色线代表差分的预测值
rss=sum(((pre-train_ARCH).dropna())**2)
print('RSS:',rss)
plt.title('RSS: %.4f'% rss)#残差平方和
plt.show()
#计算模型历史预测值和偏差率
pres_d = pd.Series(pre, copy=True)
pres_d_cumsum = pres_d.cumsum() #从第一个开始累计想加
pres = pd.Series(ts_curvation.ix[0], index=ts_curvation.index)
pres = pres.add(pres_d_cumsum,fill_value=0)
#偏差率,平均绝对误差
res=abs((pres-ts_slope)/ts_slope)
wucha_arch=sum(res)/len(res)
print('wucha',wucha_arch)
#8.34
#正负一致率
rate=signrate(ts_curvation,pres)
print(rate)
#0.5233
#回测曲线与原始数据对比图
plt.figure()
plt.plot(ts_curvation)
plt.plot(pres,color='red')
plt.title('MAE: %.4f'% wucha)
plt.show()
#----二阶差分
#计算模型历史预测值和偏差率
pre_dd = pre
pre_dd_cumsum = pre_dd.cumsum() #cumsum从第一个开始累计加和
pre_d = pd.Series(ts_curvation_d.ix[0], index=ts_curvation_d.index)
pre_d = pre_d.add(pre_dd_cumsum,fill_value=0) #同序列值相加
plt.figure()
plt.plot(ts_curvation_d)
plt.plot(pre_d,color='red')
plt.show()
pre_d_cumsum = pre_d.cumsum() #cumsum从第一个开始累计加和
pre_curvation = pd.Series(ts_curvation.ix[0], index=ts_curvation.index)
pre_curvation = pre.add(pre_d_cumsum,fill_value=0) #同序列值相加
#偏差率,平均绝对误差
res=abs((pre_curvation-ts_curvation)/ts_curvation).dropna()
wucha=sum(res)/len(res)
print('wucha',wucha)
#84.12
#回测曲线与原始数据对比图
plt.figure()
plt.plot(ts_curvation)
plt.plot(pre_curvation,color='red')
plt.title('MAE: %.4f'% wucha)
plt.show()