# -*- coding: utf-8 -*- """ A minimalistic Echo State Networks demo with Mackey-Glass (delay 17) data in "plain" scientific Python. by Mantas LukoĹĄeviÄ?ius 2012 http://minds.jacobs-university.de/mantas """ from numpy import * from matplotlib.pyplot import * import scipy.linalg # load the data # 前2000个数据用来训练,2001-4000的数据用来测试。训练数据中, # 前100项用来初始化储备池,以让储备池中形成良好的回声之后再开始训练。 trainLen = 2000 testLen = 2000 initLen = 100 data = loadtxt('MackeyGlass_t17.txt') # plot some of it figure(10).clear() plot(data[0:1000]) title('A sample of data') # generate the ESN reservoir inSize = outSize = 1 resSize = 1000 a = 0.3 # leaking rate 可以看作储备池更新的速度,可不不加,即设为1. random.seed(42) # 随机初始化 Win 和 W Win = (random.rand(resSize, 1 + inSize) - 0.5) * 1 W = random.rand(resSize, resSize) - 0.5 # 对W进行防缩,以满足稀疏的要求。 # Option 1 - direct scaling (quick&dirty, reservoir-specific): # W *= 0.135 # Option 2 - normalizing and setting spectral radius (correct, slow): print 'Computing spectral radius...', rhoW = max(abs(linalg.eig(W)[0])) print 'done.' W *= 1.25 / rhoW # allocated memory for the design (collected states) matrix X = zeros((1 + inSize + resSize, trainLen - initLen)) # set the corresponding target matrix directly Yt = data[None, initLen + 1:trainLen + 1] # 输入所有的训练数据,然后得到每一时刻的输入值和储备池状态。 # run the reservoir with the data and collect X x = zeros((resSize, 1)) for t in range(trainLen): u = data[t] x = (1 - a) * x + a * tanh(dot(Win, vstack((1, u))) + dot(W, x)) if t >= initLen: X[:, t - initLen] = vstack((1, u, x))[:, 0] # 使用Wout根据输入值和储备池状态去拟合目标值,这是一个简单的线性回归问题, # 这里使用的是岭回归(Ridge Regression)。 # train the output reg = 1e-8 # regularization coefficient X_T = X.T Wout = dot(dot(Yt, X_T), linalg.inv(dot(X, X_T) + \ reg * eye(1 + inSize + resSize))) # Wout = dot( Yt, linalg.pinv(X) ) # 使用训练数据进行前向处理得到结果 # run the trained ESN in a generative mode. no need to initialize here, # because x is initialized with training data and we continue from there. Y = zeros((outSize, testLen)) u = data[trainLen] for t in range(testLen): x = (1 - a) * x + a * tanh(dot(Win, vstack((1, u))) + dot(W, x)) y = dot(Wout, vstack((1, u, x))) Y[:, t] = y # generative mode:生成模型 u = y ## this would be a predictive mode:预测模型 # u = data[trainLen+t+1] # compute MSE for the first errorLen time steps errorLen = 500 mse = sum(square(data[trainLen + 1:trainLen + errorLen + 1] - Y[0, 0:errorLen])) / errorLen print 'MSE = ' + str(mse) # plot some signals figure(1).clear() plot(data[trainLen + 1:trainLen + testLen + 1], 'g') plot(Y.T, 'b') title('Target and generated signals $y(n)$ starting at $n=0$') legend(['Target signal', 'Free-running predicted signal']) figure(2).clear() plot(X[0:20, 0:200].T) title('Some reservoir activations $\mathbf{x}(n)$') figure(3).clear() bar(range(1 + inSize + resSize), Wout[0]) title('Output weights $\mathbf{W}^{out}$') show()
SNE
最新推荐文章于 2023-09-22 12:05:26 发布