话不多说,直接上代码。
代码(python-tensorflow)
本代码主要使用遗传算法寻找带注意力机制的LSTM的四个参数的最优组合:“头”的个数、时间步长、两层隐藏层的神经元个数。
导包:
import random
from random import choices
from random import randint
import math
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Layer
import tensorflow as tf
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
from tensorflow.keras.layers import Input
from tensorflow.keras import Model
from tensorflow.keras.layers import BatchNormalization,LayerNormalization
import warnings
warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
多头注意力机制:
方式一,这里定义为了函数。
def multihead_self_attention(head,input1,input_size):
#生成权重
Wq = tf.Variable(tf.random.truncated_normal([input_size,input_size],0,1))
Wk = tf.Variable(tf.random.truncated_normal([input_size,input_size],0,1))
Wv = tf.Variable(tf.random.truncated_normal([input_size,input_size],0,1))
#线性变换
q_train = tf.matmul(Wq,input1)
k_train = tf.matmul(Wk,input1)
v_train = tf.matmul(Wv,input1)
#拆分多头,合并
Q_train = tf.concat(tf.split(q_train, head, axis=2), axis=0)
K_train = tf.concat(tf.split(k_train,head, axis=2), axis=0)
V_train = tf.concat(tf.split(v_train, head, axis=2), axis=0)
#乘积
output_train = tf.matmul(Q_train,tf.transpose(K_train,[0,2,1]))
#调节乘积
output_train = output_train / (K_train.get_shape().as_list()[-1] ** 0.5)
#softmax
output_train_softmax = tf.nn.softmax(output_train,axis=1)
#softmax*V
output_train = tf.matmul(output_train_softmax,V_train)
#concat
output_train = tf.concat(tf.split(output_train, head, axis=0), axis=2 )
#加上残差块
output_train +=input1
#Norm
output_train = BatchNormalization()(output_train)
return output_train
方式二,定义为层。
class Attention_Layer(Layer):
def __init__(self,input_size,head):
super(Attention_Layer,self).__init__()
#随机确定权重矩阵
self.Wq = self.add_weight('wq',[input_size,input_size])
self.Wk = self.add_weight('wk',[input_size,input_size])
self.Wv = self.add_weight('wv',[input_size,input_size])
self.head = head
def call(self,input1):
q_train = tf.matmul(self.Wq,input1)
k_train = tf.matmul(self.Wk,input1)
v_train = tf.matmul(self.Wv,input1)
Q_train = tf.concat(tf.split(q_train, self.head, axis=2), axis=0)
K_train = tf.concat(tf.split(k_train,self.head, axis=2), axis=0)
V_train = tf.concat(tf.split(v_train, self.head, axis=2), axis=0)
output_train = tf.matmul(Q_train,tf.transpose(K_train,[0,2,1]))
#调节乘积
output_train = output_train / (K_train.get_shape().as_list()[-1] ** 0.5)
#softmax
output_train_softmax = tf.nn.softmax(output_train,axis=1)
#softmax*V
output_train = tf.matmul(output_train_softmax,V_train)
#concat
output_train = tf.concat(tf.split(output_train, self.head, axis=0), axis=2 )
#加上残差块
output_train +=input1
#Norm
#output_train = BatchNormalization()(output_train)
return output_train
带注意力机制的LSTM:放在第一层LSTM层后面,主要给第一层的输出施加注意力权重。
这里使用的函数带入LSTM网络结构,在训练的时候会提示错误,要求以层代入,但不影响模型训练。
def msa_ga_lstm(window,dimension,head,lstm1_units,lstm2_units):
inputs = Input(shape=(window,dimension))
lstm1_outputs = LSTM(lstm1_units,return_sequences=True,kernel_regularizer='l2')(inputs)
msa_outputs = multihead_self_attention(head,lstm1_outputs,window)
lstm2_outputs = LSTM(lstm2_units,kernel_regularizer='l2')(msa_outputs)
dropout_outputs = Dropout(0.2)(lstm2_outputs)
dense1_units = 32
dense1_outputs = Dense(dense1_units,kernel_initializer="uniform",activation='relu')(dropout_outputs)
dense2_outputs = Dense(1)(dense1_outputs)
model = Model(inputs,dense2_outputs)
model.compile(loss='mse',optimizer='adam', metrics=['accuracy'])
return model
也可以以层代入:
def msa_ga_lstm(window,dimension,head,lstm1_units,lstm2_units):
inputs = Input(shape=(window,dimension))
lstm1_outputs = LSTM(lstm1_units,return_sequences=True)(inputs)
msa_outputs = Attention_Layer(window,head)(lstm1_outputs)
bn_outputs = BatchNormalization()(msa_outputs)
lstm2_outputs = LSTM(lstm2_units)(bn_outputs)
dropout_outputs = Dropout(0.2)(lstm2_outputs)
dense1_units = 32
dense1_outputs = Dense(dense1_units,kernel_initializer="uniform",activation='relu')(dropout_outputs)
dense2_outputs = Dense(1)(dense1_outputs)
model = Model(inputs,dense2_outputs)
model.compile(loss='mse',optimizer='adam', metrics=['accuracy'])
return model
遗传算法优化:
data_helper函数用来将数据集处理成LSTM模型能够输入的数据格式。这里附上我使用的,具体根据个人需要。
#划分训练集、测试集
def data_helper(df1,df2,time_frame):
datavalue1 = df1.values
datavalue2 = df2.values
result1 = []
result2 = []
for index in range( len(datavalue1) - (time_frame) ):
result1.append(datavalue1[index: index + (time_frame+1) ])
for index in range( len(datavalue2) - (time_frame) ):
result2.append(datavalue2[index: index + (time_frame+1) ])
result1 = np.array(result1)
result2 = np.array(result2)
position = []
for i in range(7000,9000):
if result2[i,:,:][time_frame,9] == 4.8: #本程序需要,可以忽略
position.append(i)
x_train = result1[:int(position[0]), :-1]
y_train = result1[:int(position[0]), -1][:,-1]
# 测试数据
x_test = result1[int(position[0]):, :-1]
y_test = result1[int(position[0]):, -1][:,-1]
return [x_train, y_train, x_test, y_test]
#损失函数
def fitness(model):
x_train,y_train,x_test,y_test = data_helper(data_norm, data,model.input_shape[1])
x_train = x_train.astype(np.float32)
y_train = y_train.astype(np.float32)
x_test = x_test.astype(np.float32)
y_test = y_test.astype(np.float32)
model.fit(x_train,y_train, validation_split=0.1,epochs=100,batch_size=64,verbose=1)
loss = model.history.history['loss']
loss = np.sum(loss)
return 1/loss
#生成head和lstm1
def head_lstm1(head_lower:int,head_upper:int,LSTM1_lower:int,LSTM1_upper:int):
head = []
lstm1 = []
x = random.randint(head_lower,head_upper)
for i in range(10000):
y = random.randint(LSTM1_lower,LSTM1_upper)
if y%x ==0: break #这里保证head和lstm1成比例,在后续运算提供便利,不需要再给矩阵padding
head.append(x)
lstm1.append(y)
return head,lstm1
#创建初始群体
def generate_population(size:int,head_lower:int,head_upper:int,window_lower: float, window_upper: float,
LSTM1_lower: float, LSTM1_upper: float,LSTM2_lower: float, LSTM2_upper: float): #时间步长、头部、两层lstm层神经元个数范围
pop = []
i = 0
while i < size:
lstm2 = random.randint(LSTM2_lower, LSTM2_upper)
window =random.randint(window_lower, window_upper)
head,lstm1 = head_lstm1(head_lower,head_upper,LSTM1_lower,LSTM1_upper)
model = msa_ga_lstm(window,12,head[0],lstm1[0],lstm2)
pop.append(model)
i +=1
return pop
#计算每个网络结构下的适应度
def rank_population(population):
ranked_pop = []
for p in population:
_f = fitness(p)
ranked_pop.append((_f, p))
return ranked_pop
#选择神经网络
def select_pair(ranked_population):
pair = [] #三维数据
fitness = np.array(ranked_population)[:,0]
fitness_num = np.array(ranked_population)[:,0].sum()
p =fitness/fitness_num
idx = np.random.choice(np.array(len(ranked_population)),size = len(ranked_population),replace = True,p =p.astype(float) )
for i in idx:
pair.append(ranked_population[i])
print('选中的个体:',pair)
return pair
#交叉(算术交叉)
def cross_over(pair):
parameter = []
for i in pair:
window_1 = i[1].layers[1].output_shape[1]
head_1 = int(i[1].layers[1].output_shape[2]/i[1].layers[4].output_shape[0][2])
lstm1_1 = i[1].layers[1].output_shape[2]
lstm2_1 = i[1].layers[20].output_shape[1]
parameter.append([head_1,window_1,lstm1_1,lstm2_1])
cross_position = random.randint(0,3) #选择交叉位置
print('parameter:',parameter)
print('交叉位置:',cross_position)
cross_rate = 0.6 #交叉率
parameter_cross_result = [] #存储交叉后的结果
for j in [0,2,4,6,8,10,12,14,16,18]:
x1 = parameter[j]
x2 = parameter[j+1]
if np.random.rand() < cross_rate:
#算术交叉,单位置交叉
a = 0.3
if cross_position == 0:
x1[cross_position] = round(a*x2[cross_position] +(1-a)*x1[cross_position])
x2[cross_position] = round(a*x1[cross_position] +(1-a)*x2[cross_position])
#x1[cross_position+1] = round(a*x2[cross_position+1] +(1-a)*x1[cross_position+1])
#x2[cross_position+1] = round(a*x1[cross_position+1] +(1-a)*x2[cross_position+1])
#x1[cross_position+2] = round(a*x2[cross_position+2] +(1-a)*x1[cross_position+2])
#x2[cross_position+2] = round(a*x1[cross_position+2] +(1-a)*x2[cross_position+2])
#x1[cross_position+3] = round(a*x2[cross_position+3] +(1-a)*x1[cross_position+3])
#x2[cross_position+3] = round(a*x1[cross_position+3] +(1-a)*x2[cross_position+3])
if cross_position ==1:
x1[cross_position] = round(a*x2[cross_position] +(1-a)*x1[cross_position])
x2[cross_position] = round(a*x1[cross_position] +(1-a)*x2[cross_position])
#x1[cross_position+1] = round(a*x2[cross_position+1] +(1-a)*x1[cross_position+1])
#x2[cross_position+1] = round(a*x1[cross_position+1] +(1-a)*x2[cross_position+1])
#x1[cross_position+2] = round(a*x2[cross_position+2] +(1-a)*x1[cross_position+2])
#x2[cross_position+2] = round(a*x1[cross_position+2] +(1-a)*x2[cross_position+2])
if cross_position ==2:
x1[cross_position] = round(a*x2[cross_position] +(1-a)*x1[cross_position])
x2[cross_position] = round(a*x1[cross_position] +(1-a)*x2[cross_position])
#x1[cross_position+1] = round(a*x2[cross_position+1] +(1-a)*x1[cross_position+1])
#x2[cross_position+1] = round(a*x1[cross_position+1] +(1-a)*x2[cross_position+1])
if cross_position ==3:
x1[cross_position] = round(a*x2[cross_position] +(1-a)*x1[cross_position])
x2[cross_position] = round(a*x1[cross_position] +(1-a)*x2[cross_position])
parameter_cross_result.append(x1)
parameter_cross_result.append(x2)
else:
parameter_cross_result.append(x1)
parameter_cross_result.append(x2)
print('parameter_cross_result:',parameter_cross_result)
return parameter_cross_result
#变异(不合格lstm1必变异)
def mutation(children):
mutation_rate = 0.05 #变异率
parameter_mutation_result = []
mutation_position = random.randint(0,3) #变异位置
print('变异位置:',mutation_position)
for i in range(len(children)):
x1 = children[i]
if np.random.rand()<mutation_rate:
if mutation_position == 0: #head变异
x1[0] = random.randint(6,10)
if mutation_position ==1 : #window变异
x1[1] = random.randint(10,50)
if mutation_position ==2: #选择的lstm1变异
for i in range(10000):
lstm1_mutation_result = random.randint(30,100)
if lstm1_mutation_result%x1[0] ==0: break
x1[2] = lstm1_mutation_result
if mutation_position ==3: #lstm2变异
x1[3] = random.randint(30,100)
if x1[2] % x1[0] == 0:
parameter_mutation_result.append(x1)
else:
for z in range(10000):
select_mutation_lstm1 = random.randint(30,100)
if select_mutation_lstm1 % x1[0] == 0: break
x1[2] = select_mutation_lstm1
parameter_mutation_result.append(x1)
else:
#没选择的lstm1变异
if x1[2] % x1[0] == 0:
parameter_mutation_result.append(x1)
else:
for c in range(10000):
rest_lstm1_mutation_result = random.randint(30,100)
if rest_lstm1_mutation_result%x1[0] ==0: break
x1[2] = rest_lstm1_mutation_result
parameter_mutation_result.append(x1)
print('parameter_mutation_result:',parameter_mutation_result)
#将参数承接到模型
pair = []
for mutation_parameter in parameter_mutation_result:
model = msa_ga_lstm(mutation_parameter[1],12,mutation_parameter[0],mutation_parameter[2],mutation_parameter[3])
pair.append(model)
return pair
#个体20,头:6-10,时间窗口:10-50,lstm1:30-100,lstm2:30-100
pop = generate_population(20,6,10,10,50,30,100,30,100)
ranked_pop = rank_population(pop)
ranked_pop.sort()
#迭代40次
N = 40
i = 0
while i < N:
print('父代信息:')
for p in ranked_pop:
print('val:',p[0],'head:',int(p[1].layers[1].output_shape[2]/p[1].layers[4].output_shape[0][2]),'window:',p[1].layers[1].output_shape[1],
'lstm1:',p[1].layers[1].output_shape[2],'lstm2:',p[1].layers[20].output_shape[1])
print('进行选择:')
pair = select_pair(ranked_pop)
print('进行交叉:')
children = cross_over(pair)
print('进行变异:')
mutated_pop = mutation(children)
#计算子代模型的val,并和模型结合
ranked_mutated_pop = rank_population(mutated_pop)
print('子代模型的val和模型:',ranked_mutated_pop)
ranked_pop = ranked_mutated_pop
ranked_pop.sort()
print("--------- 遗传", i, " 完成 ---------")
i +=1
#输出迭代结果
for p in ranked_pop:
print('val:',p[0],'head:',int(p[1].layers[1].output_shape[2]/p[1].layers[4].output_shape[0][2]),'window:',p[1].layers[1].output_shape[1],
'lstm1:',p[1].layers[1].output_shape[2],'lstm2:',p[1].layers[20].output_shape[1])
结语:
本文内容均由个人查询资料和摸索形成,其中不免有些错误,如有错,请指出。本文代码提供思路,形成自己的逻辑最为重要。相互学习,相互进步,加油!!!