理解GAT

从零开始实现GAT

GAT

配着论文中的公式,看着代码,应该能看懂。

import numpy as np
np.random.seed(0)
# 构建邻接矩阵
A = np.array([
    [1,1,1,1],
    [1,1,0,0],
    [1,0,1,1],
    [1,0,1,1]
]
)

# 随机生成一个节点特征矩阵X
X = np.random.uniform(-1,1,(4,4))
print(X)

# 在图注意力层中有两个权重矩阵,分别是常规权重矩阵W, 和注意力权重矩阵Watt
W = np.random.uniform(-1,1,(2,4))

W_att = np.random.uniform(-1,1,(1,4))

# A = np.array([[0, 1, 0, 1],
#               [1, 0, 1, 0],
#               [0, 1, 0, 1],
#               [1, 0, 1, 0]])
# Connections:
# (array([0, 0, 1, 1, 2, 2, 3, 3]),
# array([1, 3, 0, 2, 1, 3, 0, 2]))
# 表明 (0,1) 索引存在一条边
connections = np.where(A > 0)
print(connections)

# 使用连接方式
# 常规权重矩阵,源节点, 目的节点
"""
eg:
transformed_X = np.array([
    [1.0, 1.0],  # 节点 0 变换后的特征
    [2.0, 2.0],  # 节点 1 变换后的特征
    [3.0, 3.0],  # 节点 2 变换后的特征
    [4.0, 4.0]   # 节点 3 变换后的特征
])
拼接后:
# 连接后的特征矩阵
[[1.0, 1.0, 2.0, 2.0],  # 节点 0 -> 节点 1
 [1.0, 1.0, 3.0, 3.0],  # 节点 0 -> 节点 2
 [1.0, 1.0, 4.0, 4.0],  # 节点 0 -> 节点 3
 [2.0, 2.0, 1.0, 1.0],  # 节点 1 -> 节点 0
 [2.0, 2.0, 3.0, 3.0],  # 节点 1 -> 节点 2
 [2.0, 2.0, 4.0, 4.0],  # 节点 1 -> 节点 3
 [3.0, 3.0, 1.0, 1.0],  # 节点 2 -> 节点 0
 [4.0, 4.0, 1.0, 1.0],  # 节点 3 -> 节点 0
 [4.0, 4.0, 2.0, 2.0],  # 节点 3 -> 节点 1
 [4.0, 4.0, 3.0, 3.0]]  # 节点 3 -> 节点 2

"""
# np.concatenate([(X @ W.T)][connections[0]],[(X @ W.T)][connections[1]])

# 注意力权重矩阵W_att进行线性变换
a = W_att @ np.concatenate([(X @ W.T)][connections[0]],[(X @ W.T)][connections[1]])

# Leaky Relu 函数
def leaky_relu(x,alpha=0.2):
    return np.maximum(alpha*x,x)
# 引用leaky_relu 函数
e = leaky_relu(a)

# 注意力系数矩阵。
E = np.zeros(A.shape)
# 将计算得到的注意力值填充到矩阵种
E[connections[0],connections[1]] = e[0]

# 进行softmax操作
def softmax2D(x,axis):
    # 为了数值的稳定性,每个元素减去所在行或列的最大值
    """
    eg:x = np.array([
    [1, 2, 3],
    [4, 5, 6]
    ])
    np.max(x, axis=1)  # 输出:[3, 6]
    np.expand_dims(np.max(x, axis=1), axis=1)  # 输出:[[3], [6]]
    x - np.expand_dims(np.max(x, axis=1), axis=1)  # 输出:[[-2, -1, 0], [-2, -1, 0]]
    """
    e = np.exp(x - np.expand_dims(np.max(x,axis=axis),axis))
    sum = np.expand_dims(np.sum(e,axis=axis),axis)
    return e / sum

# 对计算得到的注意力系数进行归一化操作
W_alpha = softmax2D(E,1)

H = A.T @ W_alpha @ X @ W.T

项目实战,成为调包侠!

from torch_geometric.datasets import Planetoid
import torch
import torch.nn.functional as F
from torch_geometric.nn import GATv2Conv,GCNConv
from torch.nn import Linear,Dropout
# 导入Cora数据集

dataset = Planetoid(root='.',name='Cora')
data = dataset[0]

# 定义准确率评估函数
def accuracy(y_pred,y_true):
    return torch.sum(y_pred == y_true) / len(y_true)

class GAT(torch.nn.Module):
        def __init__(self,dim_in,dim_h,dim_out,heads=8):
            super().__init__()
            self.gat1 = GATv2Conv(dim_in,dim_h,heads=heads)
            self.gat2 = GATv2Conv(dim_h*heads,dim_out,heads=1)

        def forward(self, x, edge_index):
            h = F.dropout(x, p=0.6, training=self.training)
            h = self.gat1(h, edge_index)
            h = F.relu(h)
            h = F.dropout(h, p=0.6, training=self.training)
            h = self.gat2(h, edge_index)
            return F.log_softmax(h, dim=1)

        def fit(self, data, epochs):
             criterion = torch.nn.CrossEntropyLoss()
             optimizer = torch.optim.Adam(self.parameters(), lr=0.01, weight_decay=0.01)
        
             self.train()
             for epoch in range(epochs+1):
                 optimizer.zero_grad()
                 out = self(data.x, data.edge_index)
                 loss = criterion(out[data.train_mask], data.y[data.train_mask])
                 acc = accuracy(out[data.train_mask].argmax(dim=1), data.y[data.train_mask])
                 loss.backward()
                 optimizer.step()
                 if(epoch % 20 == 0):
                     val_loss = criterion(out[data.val_mask], data.y[data.val_mask])
                     val_acc = accuracy(out[data.val_mask].argmax(dim=1), data.y[data.val_mask])
                     print(f'Epoch {epoch:>3} | Train Loss: {loss:.3f} | Train Acc: {acc*100:>5.2f}% | Val Loss: {val_loss:.2f} | Val Acc: {val_acc*100:.2f}%')
@torch.no_grad()
def test(self, data):
    self.eval()
    out = self(data.x, data.edge_index)
    acc = accuracy(out.argmax(dim=1)[data.test_mask], data.y[data.test_mask])
    return acc
gat = GAT(dataset.num_features, 32, dataset.num_classes)
print(gat)

# Train
gat.fit(data, epochs=100)

Epoch 0 | Train Loss: 1.953 | Train Acc: 15.71% | Val Loss: 1.95 | Val Acc: 18.20%
Epoch 20 | Train Loss: 0.211 | Train Acc: 98.57% | Val Loss: 0.98 | Val Acc: 69.40%
Epoch 40 | Train Loss: 0.190 | Train Acc: 97.14% | Val Loss: 0.92 | Val Acc: 73.80%
Epoch 60 | Train Loss: 0.146 | Train Acc: 99.29% | Val Loss: 0.93 | Val Acc: 71.20%
Epoch 80 | Train Loss: 0.162 | Train Acc: 97.14% | Val Loss: 0.85 | Val Acc: 72.20%
Epoch 100 | Train Loss: 0.220 | Train Acc: 95.71% | Val Loss: 0.89 | Val Acc: 71.00%

  • 13
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值