1. 注意力机制模型
一般,线性模型的全连接网络如下,
图1. 线性模型全连接神经网络
图1是一个全连接的神经网络,所有特征有等同的地位。在前向传播中,前一层网络的节点和下一层网络的节点之间有权重,网络训练时每个节点都是被公平对待的。但是在实际中,只有一部分特征值得被关注。
通过注意力机制,在某一层网络中,重点关注某个特征,并在网络反向传播过程中,不断调整需要关注的特征。比如我们在观察一个人的动作时,并不是从头到脚看每个局部的姿态,而是重点关注关节,我们实践中发现,根据关注膝盖处的状态,就能知道一个人是站着还是坐着还是蹲着。
图2. 增加注意力机制的线性模型神经网络
一个注意力函数可以描述为将 Query 与一组键值对(Key-Value)映射到输出,其中 Query、Key、Value 和输出都是向量。输出可以通过值的加权和而计算得出,其中分配到每一个值的权重可通过 Query 和对应 Key 的适应度函数(compatibility function)计算。根据Query(q)和key(k)的相似性,有如下几种注意力函数(参考https://www.jianshu.com/p/3968af85d3cb):
加性模型:
点积:
Cosine相似性:
MLP网络:
对得分进行归一化处理,可以使用softmax函数,为防止乘积结果过大,为上述结果乘上一个收缩因子.得到权重系数如下:
最终得到Query的Attention数值,
代码表示以上过程如下:
# 定义注意力机制模型
class Atten_model(torch.nn.Module):
def __init__(self, in_dim,out_dim):
super(Atten_model, self).__init__()
self.k = nn.Linear(in_dim, out_dim)
self.q = nn.Linear(in_dim, out_dim)
self.v = nn.Linear(in_dim, out_dim)
def forward(self,x):
# k q v 均将x从in_dim转变为out_dim,特征拓展、特征对应一个权重
k = self.k(x)
q = self.q(x)
v = self.v(x)
# 点乘计算注意力
atten = F.softmax((k*q)/torch.sqrt(torch.tensor(v.shape[1])), dim = 1)
# 特征值
out = atten * v
return out
2. Code
2.1. 读取数据
导入sklearn.datasets库中的load_iris模块,加载鸢尾花数据。数据共包含150个样本,每个样本有4个特征:SepaLengthCm: 花萼长度;SepalWidthCm: 花萼宽度;PetalLengthCm: 花瓣长度,和PetalWidthCm; 花瓣宽度(单位cm),包含3类鸢尾花:setosa、versicolor和virginica。在监督学习下,根据鸢尾花特征实现鸢尾花的分类。按照9:1:1的比例划分训练集、验证集和测试集。
# 导入鸢尾花数据的库
from sklearn.datasets import load_iris
# 获取鸢尾花数据
iris = load_iris()
# 特征数据,numpy.ndarray,150×4
x = iris.data
print(x.shape)
# 标签,numpy.ndarray,150×1
target = iris.target
print(target.shape)
# 上传到设备
x = torch.tensor(x, dtype=torch.float32).to(device)
target = torch.tensor(target, dtype=torch.long).to(device)
# 生成test、val、test
ids = np.arange(x.shape[0])
np.random.seed(666)
np.random.shuffle(ids)
train_ids = ids[0: int(x.shape[0] * 0.8)]
val_ids = ids[int(x.shape[0] * 0.8): int(x.shape[0] * 0.9)]
test_ids = ids[int(x.shape[0] * 0.9): ]
x_train, target_train = x[train_ids], target[train_ids]
x_val, target_val = x[val_ids], target[val_ids]
x_test, target_test = x[test_ids], target[test_ids]
# print(x_test, target_test, y_test)
2.2 注意力机制模型
# 定义注意力机制模型
class Atten_model(torch.nn.Module):
def __init__(self, in_dim,out_dim):
super(Atten_model, self).__init__()
self.k = nn.Linear(in_dim, out_dim)
self.q = nn.Linear(in_dim, out_dim)
self.v = nn.Linear(in_dim, out_dim)
self.relu = nn.ReLU()
def forward(self,x):
# k q v 均将x从in_dim转变为out_dim,特征拓展、特征对应一个权重
k = self.k(x)
q = self.q(x)
v = self.v(x)
# 点乘计算注意力
atten = F.softmax((k*q)/torch.sqrt(torch.tensor(v.shape[1])), dim = 1)
# 特征值
out = atten * v
return self.relu(out)
class NeuralNet(torch.nn.Module):
def __init__(self, in_dim, out_dim):
super(NeuralNet, self).__init__()
self.layer_1 = Atten_model(in_dim, 10)
self.layer_2 = Atten_model(10, 20)
self.linear = nn.Linear(20, out_dim)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.layer_1(x)
out = self.layer_2(out)
out = self.linear(out)
out = self.sigmoid(out)
return out
model_atten = NeuralNet(4, 3)
model_atten = model_atten.to(device)
2.3 一般线性网络
class LinearNet(torch.nn.Module):
def __init__(self, in_dim, out_dim):
super(LinearNet, self).__init__()
self.layer_1 = nn.Linear(in_dim, 10)
self.layer_2 = nn.Linear(10, 20)
self.layer_3 = nn.Linear(20, out_dim)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def forward(self, x):
out = self.layer_1(x)
out = self.relu(out)
out = self.layer_2(out)
out = self.relu(out)
out = self.layer_3(out)
out = self.sigmoid(out)
return out
model_linear = LinearNet(4, 3)
model_linear = model_linear.to(device)
3. 结果
定义网络训练识别的精度,
# 计算准确率
accuracy_linear = (pred_train_linear.argmax(1) == target_train).sum()/pred_train_linear.shape[0]
Epoch | 注意力机制模型网络 | 线性模型网络 | ||
---|---|---|---|---|
200 | 0.793 | 0.666 | 0.669 | 0.649 |
400 | 0.580 | 0.649 | 0.669 | 0.649 |
600 | 0.562 | 0.975 | 0.669 | 0.649 |
800 | 0.560 | 0.991 | 0.669 | 0.649 |
10000 | 0.560 | 0.991 | 0.669 | 0.649 |
网络训练到1000次,注意力机制模型的准确率达到了,而在其他参数相同的情况下,线性模型的准确率只有。下图分别attention模型和线性模型训练时Loss和accuracy的收敛图。
图3. 训练精度收敛图
图4. 训练误差收敛图
模型在验证集和测试集上的表现如下:
指标 | 数据集 | Atten_Model | Linear_Model |
---|---|---|---|
Loss | Train_Data | 0.56 | 0.66 |
Val_Data | 0.55 | 0.66 | |
Test_Data | 0.61 | 0.63 | |
Accuracy | Train_Data | 0.99 | 0.64 |
Val_Data | 1.0 | 0.66 | |
Test_Data | 0.93 | 0.80 |
交叉验证集中,网络训练的Loss和训练的精度都是Atten_Model的表现优于Linear_Model,因此Atten模型更有,在测试集上,其,。
4. 附
完整代码如下
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torch.utils.tensorboard import SummaryWriter
# 导入鸢尾花数据的库
from sklearn.datasets import load_iris
# 全局声明device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('device=',device)
# 获取鸢尾花数据
iris = load_iris()
# 特征数据,numpy.ndarray,150×4
x = iris.data
print(x.shape)
# 标签,numpy.ndarray,150×1
target = iris.target
print(target.shape)
# one-hot编码,150×3
y = np.zeros((x.shape[0], 3))
for i in range(target.shape[0]):
y[i][target[i]] = 1
# 类别名
names = iris.target_names
print(names)
# 上传到设备
x = torch.tensor(x, dtype=torch.float32).to(device)
target = torch.tensor(target, dtype=torch.long).to(device)
# 生成test、val、test
ids = np.arange(x.shape[0])
np.random.seed(666)
np.random.shuffle(ids)
train_ids = ids[0: int(x.shape[0] * 0.8)]
val_ids = ids[int(x.shape[0] * 0.8): int(x.shape[0] * 0.9)]
test_ids = ids[int(x.shape[0] * 0.9): ]
x_train, target_train = x[train_ids], target[train_ids]
x_val, target_val = x[val_ids], target[val_ids]
x_test, target_test = x[test_ids], target[test_ids]
# print(x_test, target_test, y_test)
# 定义注意力机制模型
class Atten_model(torch.nn.Module):
def __init__(self, in_dim,out_dim):
super(Atten_model, self).__init__()
self.k = nn.Linear(in_dim, out_dim)
self.q = nn.Linear(in_dim, out_dim)
self.v = nn.Linear(in_dim, out_dim)
self.relu = nn.ReLU()
def forward(self,x):
# k q v 均将x从in_dim转变为out_dim,特征拓展、特征对应一个权重
k = self.k(x)
q = self.q(x)
v = self.v(x)
# 点乘计算注意力
atten = F.softmax((k*q)/torch.sqrt(torch.tensor(v.shape[1])), dim = 1)
# 特征值
out = atten * v
return self.relu(out)
class NeuralNet(torch.nn.Module):
def __init__(self, in_dim, out_dim):
super(NeuralNet, self).__init__()
self.layer_1 = Atten_model(in_dim, 10)
self.layer_2 = Atten_model(10, 20)
# self.layer_3 = Atten_model(64, 132)
self.linear = nn.Linear(20, out_dim)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.layer_1(x)
out = self.layer_2(out)
# out = self.layer_3(out)
out = self.linear(out)
out = self.sigmoid(out)
return out
class LinearNet(torch.nn.Module):
def __init__(self, in_dim, out_dim):
super(LinearNet, self).__init__()
self.layer_1 = nn.Linear(in_dim, 10)
self.layer_2 = nn.Linear(10, 20)
self.layer_3 = nn.Linear(20, out_dim)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def forward(self, x):
out = self.layer_1(x)
out = self.relu(out)
out = self.layer_2(out)
out = self.relu(out)
out = self.layer_3(out)
out = self.sigmoid(out)
return out
model_atten = NeuralNet(4, 3)
model_atten = model_atten.to(device)
model_linear = LinearNet(4, 3)
model_linear = model_linear.to(device)
Epochs = 1000
opt = torch.optim.Adam(model_atten.parameters(), lr=0.005, weight_decay=1e-3)
Loss_func = nn.CrossEntropyLoss()
t_start = time.clock()
board = SummaryWriter('/kaggle/working/DL/logs')
for epoch in range(Epochs):
# 前向计算
pred_train_atten = model_atten(x_train)
# 其中的pred_train为150×3,target_train为150×1的原始类别编码
loss_train_atten = Loss_func(pred_train_atten, target_train)
# 训练网络
opt.zero_grad()
loss_train_atten.backward()
opt.step()
board.add_scalar("Train_loss_atten", loss_train_atten.item(), epoch)
# 计算准确率
accuracy_atten = (pred_train_atten.argmax(1) == target_train).sum()/pred_train_atten.shape[0]
board.add_scalar("Train_accuray_atten", accuracy_atten.item(), epoch)
if epoch % 100==0 or epoch==Epochs-1:
print('epoch=',epoch,' train_loss_atten=',loss_train_atten.item(), "accuary_atten=", accuracy_atten.item())
board.close()
t_end = time.clock()
t_d = t_end-t_start
print('atten: 程序运行%d个epochs的时间为%f,误差为%f'%(epoch, t_d, loss_train_atten))
torch.save(model_atten, "/kaggle/working/DL/model_atten.pth")
model_atten.eval()
with torch.no_grad():
Y_pred_train_atten = model_atten(x_train)
Y_pred_val_atten = model_atten(x_val)
Y_pred_test_atten = model_atten(x_test)
loss_train_atten = Loss_func(Y_pred_train_atten, target_train)
loss_val_atten = Loss_func(Y_pred_val_atten, target_val)
loss_test_atten = Loss_func(Y_pred_test_atten, target_test)
accuracy_train_atten = (Y_pred_train_atten.argmax(1) == target_train).sum()/Y_pred_train_atten.shape[0]
accuracy_val_atten = (Y_pred_val_atten.argmax(1) == target_val).sum()/Y_pred_val_atten.shape[0]
accuracy_test_atten = (Y_pred_test_atten.argmax(1) == target_test).sum()/Y_pred_test_atten.shape[0]
model_linear.eval()
with torch.no_grad():
Y_pred_train_linear = model_linear(x_train)
Y_pred_val_linear = model_linear(x_val)
Y_pred_test_linear = model_linear(x_test)
loss_train_linear = Loss_func(Y_pred_train_linear, target_train)
loss_val_linear = Loss_func(Y_pred_val_linear, target_val)
loss_test_linear = Loss_func(Y_pred_test_linear, target_test)
accuracy_train_linear = (Y_pred_train_linear.argmax(1) == target_train).sum()/Y_pred_train_linear.shape[0]
accuracy_val_linear = (Y_pred_val_linear.argmax(1) == target_val).sum()/Y_pred_val_linear.shape[0]
accuracy_test_linear = (Y_pred_test_linear.argmax(1) == target_test).sum()/Y_pred_test_linear.shape[0]
print("Atten Model: ")
print("训练数据,Loss={}, accuracy={}".format(loss_train_atten, accuracy_train_atten))
print("验证数据,Loss={}, accuracy={}".format(loss_val_atten, accuracy_val_atten))
print("测试数据,Loss={}, accuracy={}".format(loss_test_atten, accuracy_test_atten))
print("Linear Model: ")
print("训练数据,Loss={}, accuracy={}".format(loss_train_linear, accuracy_train_linear))
print("验证数据,Loss={}, accuracy={}".format(loss_val_linear, accuracy_val_linear))
print("测试数据,Loss={}, accuracy={}".format(loss_test_linear, accuracy_test_linear))