IP-ESN

在这里插入图片描述

"""
A minimalistic Echo State Networks demo with Mackey-Glass (delay 17) data 
in "plain" scientific Python.
by Mantas Lukosevicius 2012
http://minds.jacobs-university.de/mantas
---
Modified by Xavier Hinaut: 19 November 2015.
http://www.xavierhinaut.com

Modified by Remy Portelas: 30 May 2016
https://github.com/rPortelas/ip_in_esn

Modified by Xiao Wei: 20 Oct 2020
https://goodgoodstudy.blog.csdn.net/article/details/109226320
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
import random

def set_seed(seed=None):
    """Making the seed (for random values) variable if None"""

    # Set the seed
    if seed is None:
        import time
        seed = int((time.time()*10**6) % 10**12)
        
    try:
        random.seed(seed) #np.random.seed(seed)
        print("Seed used for random values:", seed)
    except:
        print("!!! WARNING !!!: Seed was not set correctly.")
    return seed

#reservoir's neurons activation function    
def sigmoid(x):
    if activation_function_mode == 'tanh':
        return np.tanh(x)
    elif activation_function_mode == 'fermi':
        return ( 1 / ( 1 + np.exp(-x)))
    else:
        raise Exception("ERROR: 'activation_function_mode' was not " + \
        "set correctly.")

#plot the activation of all neurons in the reservoir during 1 epoch
def plot_activity(x,epoch):
    plt.figure(epoch+42).clear()
    if activation_function_mode == 'tanh':
         plt.hist(x.ravel(), bins = 200)
         plt.xlim(-1,+1)
    else :#fermi
        plt.hist(x.ravel(), bins = 100)
        plt.xlim(0,+1)
    plt.xlabel('neurons outputs')
    plt.ylabel('number of neurons')
    #compute some caracteristics of the distribution
    mean = str(round(x.mean(), 2))
    med = str(round(np.median(x), 2))
    min = str(round(x.min(), 2))
    max = str(round(x.max(), 2))
    std_dev = str(round(x.std(), 2))
    plt.title('Spatio-temporal distribution of the reservoir at epoch ' + \
    str(epoch) + '\n mean = '+ mean + ' median = ' + med + ' min = ' + \
    min + ' max = ' + max + ' std_dev = ' + std_dev)
 
def plot_neuron_activity(x,epoch):
    plt.figure(epoch+84).clear()
    if activation_function_mode == 'tanh':
         plt.hist(x.ravel(), bins = 200)
         plt.xlim(-1,+1)
    else :#fermi
        plt.hist(x.ravel(), bins = 100)
        plt.xlim(0,+1)
    plt.xlabel('neuron outputs')
    plt.ylabel('number of timesteps')
    #compute some caracteristics of the distribution
    plt.title('The output distribution of a single randomly chosen neuron ' + \
    'at epoch ' + str(epoch))

# load the data
trainLen = 4000  # include prepareLen
prepareLen = 2000
testLen = 2000

data = np.loadtxt('../datasets/MackeyGlass_t17.txt')
print(data.shape)
# plot some of it
# plt.figure(10).clear()
# plt.plot(data[0:1000])
# plt.title('A sample of data')
# plt.show()

# mode = 'prediction'  #given x try to predict x+1
mode = 'generative' #compute x and use it as an input to compute x+1

activation_function_mode = 'tanh'
# activation_function_mode = 'fermi'

wout_mode = 'entries bias and resOut'
# wout_mode = 'resOut and bias only'

# ip_mode = 'intrinsic plasticity on'
ip_mode = 'intrinsic plasticity off'

#IP parameter
# ip_update_mode = 'leaky neurons treated'
ip_update_mode = 'leaky neurons ignored'

#Set the number of training's epochs,
if ip_mode == 'intrinsic plasticity on':
  if activation_function_mode == 'tanh':
      nb_epoch = 3 #IP with tanh neurons needs less time to converge
  else:
    nb_epoch = 41
else: #if no IP, then we don't need multiple epochs of training
  nb_epoch = 1

# generate the ESN reservoir
inSize = outSize = 1 #input/output dimension
resSize = 100 #reservoir size
a = 0.3 # leaking rate
if ip_mode == 'intrinsic plasticity on':
    spectral_radius = 1.
    reg = 0.02  #regularization coefficient
    #init Intrisic Plasticity (IP)
    lr = 0.001  #learning rate   
    if activation_function_mode == 'tanh':
        m = 0.      #mean
        sigma = 0.2 #standard deviation 0.2 gives best results
        var = np.square(sigma) #variance
    
    else : #fermi
        m = 0.2
    #instanciate some matrix to store the evolution of IP's gain and bias
    ip_gain = np.ones((resSize, 1))
    record_ip_gain = np.zeros(((nb_epoch-1) * trainLen , 1))
    record_ip_bias = np.zeros(((nb_epoch-1) * trainLen , 1))
    ip_bias = np.zeros((resSize, 1))
    
    
else :          #IP off
    spectral_radius = 1.25
    reg = 1e-8 # regularization coefficient
    
input_scaling = 1.

#change the seed, reservoir performances should be averaged accross at least 
#20 random instances (with the same set of parameters)
our_seed = None #Choose a seed or None
set_seed(our_seed) 

#generation of random weights
Win = (np.random.rand(resSize,1+inSize)-0.5) * input_scaling
W = np.random.rand(resSize,resSize)-0.5

# Option 1 - direct scaling (quick&dirty, reservoir-specific):
#W *= 0.135 
# Option 2 - normalizing and setting spectral radius (correct, slow):
print('Computing spectral radius...', end=' ')
rhoW = max(abs(np.linalg.eig(W)[0])) #maximal eigenvalue
print('done.')
W *= spectral_radius / rhoW

# allocated memory for the design (collected states) matrix
if wout_mode == 'entries bias and resOut':
    X = np.zeros((1+inSize+resSize,trainLen))
    
elif wout_mode == 'resOut and bias only':
    X = np.zeros((1+resSize,trainLen))
else :
        raise Exception("ERROR: 'wout_mode' was not set correctly.")

#to display the spatio-temporal activity of an entire epoch
recorded_res_out = np.zeros((trainLen, resSize))

#choose a random neuron in the res to create a histogram of its activations
choosen_neuron = np.random.random_integers(resSize - 1)
neuron_out_records = np.zeros(trainLen)

# set the corresponding target matrix directly
Yt = data[None,1:trainLen+1] 

# run the reservoir with the data and collect X
x = np.zeros((resSize,1))
for epoch in range(nb_epoch):
    for t in range(trainLen):
        u = data[t]
        res_in = np.dot( Win, np.vstack((1,u))) + np.dot( W, x )
        #compute reservoir activations with or without IP
        if ip_mode == 'intrinsic plasticity on':
            res_out = sigmoid( ip_gain * res_in + ip_bias )
            x = (1-a) * x + a * res_out
            #compute delta_bias considering the activation function
            #we don't want to train our network during the first epoch
            if epoch != 0:
                if activation_function_mode == 'tanh':
                    if ip_update_mode == 'leaky neurons ignored':
                        d_ip_bias = (-lr) * ((-(m / var)) + (res_out / var) * \
                        ((2 * var) + 1 - np.square(res_out) + m * res_out))
                    elif ip_update_mode == 'leaky neurons treated':
                        d_ip_bias = (-lr) * ((-(m / var)) + (x / var ) * \
                        ((2 * var) + 1 - np.square(x) + m * x))
                    else:
                        raise Exception("ERROR: 'ip_update_mode' was not " \
                        "set correctly.") 
                else: #fermi
                    if ip_update_mode == 'leaky neurons ignored':   
                        d_ip_bias = lr * (1 - (2 + (1/m)) * res_out + \
                        (np.square(res_out) / m))
                    elif ip_update_mode == 'leaky neurons treated':
                        d_ip_bias = lr * (1 - (2 + (1/m)) * x + \
                        (np.square(x) / m))
                    else:
                        raise Exception("ERROR: 'ip_update_mode' was not " \
                        "set correctly.") 
                #compute delta_bias and update IP's gain and bias
                ip_bias += d_ip_bias
                ip_gain += (lr / ip_gain) + (d_ip_bias * res_in)
                #store the results to plot them
                record_ip_bias[t + (trainLen * (epoch-1)),0] = ip_bias.mean()
                record_ip_gain[t + (trainLen * (epoch-1)),0] = ip_gain.mean()
               
               
        elif ip_mode == 'intrinsic plasticity off':
            res_out = sigmoid(res_in)
            x = (1-a) * x + a * res_out 
           
        else:
           raise Exception("ERROR: 'ip_mode' was not set correctly.")
        #accumulate values of a randomly choosen reservoir's neuron activations
        neuron_out_records[t] = np.round(res_out[choosen_neuron],2)
        
        #we perform linear regression after the last epoch of training
        #so we only store the activations of the last epoch
        if epoch == nb_epoch - 1 :
            if wout_mode == 'entries bias and resOut':       
                X[:,t] = np.vstack((1,u,x))[:,0]
      
            elif wout_mode == 'resOut and bias only':
                X[:,t] = np.vstack((1,x))[:,0]
            
            else:
                raise Exception("ERROR: 'wout_mode' was not set correctly.")
        #store spatio-temporal activity of the reservoir
        recorded_res_out[t] = res_out[:,0] 
        
    #plot some signals to see if IP works
    if activation_function_mode == 'tanh':    
        plot_activity(recorded_res_out, epoch)
        plot_neuron_activity(neuron_out_records, epoch)
    if activation_function_mode == 'fermi':
        if(epoch%20 == 0):
            plot_activity(recorded_res_out, epoch)
            plot_neuron_activity(neuron_out_records, epoch)
            
    # plot the evolution of gain and bias during training 
    if ip_mode == 'intrinsic plasticity on':
        plt.figure(10).clear()
        plt.plot( record_ip_gain, label='gain' )
        plt.plot( record_ip_bias, label='bias' )
        plt.legend()
        plt.ylabel('mean value')
        plt.xlabel('number of timesteps')
        plt.title('Evolution of the mean of gain and bias ' \
        'relative to intrinsic plasticity')

# train the output, using the states after prepareLen
print(X.shape,Yt.shape)
X = X[:, prepareLen:]
Yt = Yt[:, prepareLen:]
X_T = X.T

# use ridge regression (linear regression with regularization)
if wout_mode == 'entries bias and resOut':       
    Wout = np.dot( np.dot(Yt,X_T), np.linalg.inv( np.dot(X,X_T) + \
    reg*np.eye(1+inSize+resSize)))
    
elif wout_mode == 'resOut and bias only':
    Wout = np.dot( np.dot(Yt,X_T), np.linalg.inv( np.dot(X,X_T) + \
    reg*np.eye(1+resSize)))
        
else :
    raise Exception("ERROR: 'wout_mode' was not set correctly.")
    
# use pseudo inverse
#Wout = dot( Yt, linalg.pinv(X) )

# run the trained ESN with the test set
Y = np.zeros((outSize,testLen))
u = data[trainLen]

for t in range(testLen):
    res_in = np.dot( Win, np.vstack((1,u)) ) + np.dot( W, x )
    if ip_mode == 'intrinsic plasticity on':        
        res_out = sigmoid(ip_gain * res_in + ip_bias )
        
    elif ip_mode == 'intrinsic plasticity off':
       res_out = sigmoid(res_in)
       
    else:
       raise Exception("ERROR: 'ip_mode' was not set correctly.") 
    x = (1-a) * x + a * res_out  
    
    if wout_mode == 'entries bias and resOut':       
        y = np.dot( Wout, np.vstack((1,u,x)) )
    
    elif wout_mode == 'resOut and bias only':
        y = np.dot( Wout, np.vstack((1,x)))
        
    else :
        raise Exception("ERROR: 'wout_mode' was not set correctly.")
      
    Y[:,t] = y
    if mode == 'generative':
        # generative mode:
        u = y
    elif mode == 'prediction':
        # predictive mode:
        u = data[trainLen+t+1] 
    else:
        raise Exception("ERROR: 'mode' was not set correctly.")

# compute MSE for the first errorLen time steps
errorLen = 1000
mse_for_each_t = np.square( data[trainLen+1:trainLen+errorLen+1] - \
Y[0,0:errorLen] )
mse = np.sum( mse_for_each_t ) / errorLen
print('MSE = ' + str( mse )) 
print('compared to max default (Mantas) error 2.91524629066e-07'\
'(For prediction / 100 Neurons)')
print('ratio compared to (Mantas) error ' + str(mse/2.91524629066e-07) + \
'  (For prediction / 100 Neurons)')
print("") 
print('compared to max default (Mantas) error 4.06986845044e-06 '\
'(For generation / 1000 Neurons)') 
print('compared to max default (Mantas) error 2.02529702465e-08 '\
'(For prediction / 1000 Neurons)')
plt.figure()
plt.plot(data[trainLen+1:trainLen+errorLen+1].T)
plt.plot(Y[0,0:errorLen])

import seaborn as sns
plt.figure(figsize=(20,10))
ax = sns.heatmap(X[:,:1000])

plt.figure()
plt.plot(Yt.T, color='k', label='y')
for i in range(9,10):
    plt.plot(X[i],label=str(i))
plt.legend()
plt.show()

在这里插入图片描述

1、资源项目源码均已通过严格测试验证,保证能够正常运行; 2、项目问题、技术讨论,可以给博主私信或留言,博主看到后会第一时间与您进行沟通; 3、本项目比较适合计算机领域相关的毕业设计课题、课程作业等使用,尤其对于人工智能、计算机科学与技术等相关专业,更为适合; 4、下载使用后,可先查看REAdMe.md或论文文件(如有),本项目仅用作交流学习参考,请切勿用于商业用途。 5、资源来自互联网采集,如有侵权,私聊博主删除。 6、可私信博主看论文后选择购买源代码。 1、资源项目源码均已通过严格测试验证,保证能够正常运行; 2、项目问题、技术讨论,可以给博主私信或留言,博主看到后会第一时间与您进行沟通; 3、本项目比较适合计算机领域相关的毕业设计课题、课程作业等使用,尤其对于人工智能、计算机科学与技术等相关专业,更为适合; 4、下载使用后,可先查看REAdMe.md或论文文件(如有),本项目仅用作交流学习参考,请切勿用于商业用途。 5、资源来自互联网采集,如有侵权,私聊博主删除。 6、可私信博主看论文后选择购买源代码。 1、资源项目源码均已通过严格测试验证,保证能够正常运行; 2、项目问题、技术讨论,可以给博主私信或留言,博主看到后会第一时间与您进行沟通; 3、本项目比较适合计算机领域相关的毕业设计课题、课程作业等使用,尤其对于人工智能、计算机科学与技术等相关专业,更为适合; 4、下载使用后,可先查看READme.md或论文文件(如有),本项目仅用作交流学习参考,请切勿用于商业用途。 5、资源来自互联网采集,如有侵权,私聊博主删除。 6、可私信博主看论文后选择购买源代码。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值