基于图神经网络的节点表征学习
本节中,我们将学习实现多层图神经网络的方法,以及训练图神经网络产生高质量节点表征并实现高准确性节点分类的一般过程。
我们的任务是根据节点的属性(可以是类别型、也可以是数值型)、边的信息、边的属性(如果有的话)、已知的节点预测标签,对未知标签的节点做预测。
Cora数据集介绍
- Cora数据集由许多机器学习领域的paper构成,这些paper被分为7个类别:Case_Based、Genetic_Algorithms、Neural_Networks、Probabilistic_Methods、Reinforcement_Learning、Rule_Learning、Theory。
- 在该数据集中,每一篇论文至少引用了该数据集里面另外一篇论文或者被另外一篇论文所引用,数据集总共有2708篇papers。在消除停词以及除去文档频率小于10的词汇,最终词汇表中有1433个词汇。
数据集中包含两个文件:
- .content文件包含对paper的内容描述,格式为
- <paper_id>:paper的标识符,每一篇paper对应一个标识符。
- <word_attributes>:是词汇特征,为0或1,表示对应词汇是否存在。
- <class_label>:是该文档所述的类别。
- .cites文件包含数据集的引用图(citation graph),每一行格式如下
- :被引用的paper标识符。
- :引用的paper标识符。
引用的方向是从右向左的,比如有一行为paper1 paper2,那么对应的连接关系是paper2->paper1。
MLP和GCN, GAT节点表征学习能力对比
准备工作
##引入数据集
from torch_geometric.datasets import Planetoid
from torch_geometric.transforms import NormalizeFeatures
dataset = Planetoid(root='/home/**/python_file/gnn/dataset', name='Cora',transform=NormalizeFeatures())
data = dataset[0]
print(data)
##节点表征分布可视化工具加载
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
def visualize(h, color):
z = TSNE(n_components=2).fit_transform(out.detach().cpu().numpy())
plt.figure(figsize=(10,10))
plt.xticks([])
plt.yticks([])
plt.scatter(z[:, 0], z[:, 1], s=70, c=color, cmap="Set2")
plt.show()
MLP在图节点分类任务中的应用
##MLP图节点分类器
import torch
from torch.nn import Linear
import torch.nn.functional as F
class MLP(torch.nn.Module):
def __init__(self, hidden_channels):
super(MLP, self).__init__()
torch.manual_seed(12345)
self.lin1 = Linear(dataset.num_features, hidden_channels)
self.lin2 = Linear(hidden_channels, dataset.num_classes)
def forward(self, x):
x = self.lin1(x)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return x
# 简单训练MLP
model = MLP(hidden_channels=16)
print(model)
model = MLP(hidden_channels=16)
criterion = torch.nn.CrossEntropyLoss() # Define loss criterion.
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) # Define optimizer.
def train():
model.train()
optimizer.zero_grad() # Clear gradients.
out = model(data.x) # Perform a single forward pass.
loss = criterion(out[data.train_mask], data.y[data.train_mask]) # Compute the loss solely based on the training nodes.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
return loss
for epoch in range(1, 201):
loss = train()
# print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')
##测试效果
def test():
model.eval()
out = model(data.x)
pred = out.argmax(dim=1) # Use the class with highest probability.
test_correct = pred[data.test_mask] == data.y[data.test_mask] # Check against ground-truth labels.
test_acc = int(test_correct.sum()) / int(data.test_mask.sum()) # Derive ratio of correct predictions.
return test_acc
test_acc = test()
print(f'Test Accuracy: {test_acc:.4f}')
GCN在图节点分类任务中的应用
##引入GCN
from torch_geometric.nn import GCNConv
class GCN(torch.nn.Module):
def __init__(self, hidden_channels):
super(GCN, self).__init__()
torch.manual_seed(12345)
self.conv1 = GCNConv(dataset.num_features, hidden_channels)
self.conv2 = GCNConv(hidden_channels, dataset.num_classes)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
x = self.conv2(x, edge_index)
return x
model = GCN(hidden_channels=16)
print(model)
##未训练数据可视化
model = GCN(hidden_channels=16)
model.eval()
out = model(data.x, data.edge_index)
visualize(out, color=data.y)
##训练GCN分类器
model = GCN(hidden_channels=16)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
def train():
model.train()
optimizer.zero_grad() # Clear gradients.
out = model(data.x, data.edge_index) # Perform a single forward pass.
loss = criterion(out[data.train_mask], data.y[data.train_mask]) # Compute the loss solely based on the training nodes.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
return loss
for epoch in range(1, 201):
loss = train()
# print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')
##测试效果
def test():
model.eval()
out = model(data.x, data.edge_index)
pred = out.argmax(dim=1) # Use the class with highest probability.
test_correct = pred[data.test_mask] == data.y[data.test_mask] # Check against ground-truth labels.
test_acc = int(test_correct.sum()) / int(data.test_mask.sum()) # Derive ratio of correct predictions.
return test_acc
test_acc = test()
print(f'Test Accuracy: {test_acc:.4f}')
##训练后结果可视化
model.eval()
out = model(data.x, data.edge_index)
visualize(out, color=data.y)
GAT在图节点分类任务中应用
##引入GAT
import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GAT(torch.nn.Module):
def __init__(self, hidden_channels):
super(GAT, self).__init__()
torch.manual_seed(12345)
self.conv1 = GATConv(dataset.num_features, hidden_channels)
self.conv2 = GATConv(hidden_channels, dataset.num_classes)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
x = self.conv2(x, edge_index)
return x
model = GAT(hidden_channels=16)
print(model)
##未训练数据可视化
model = GAT(hidden_channels=16)
model.eval()
out = model(data.x, data.edge_index)
visualize(out, color=data.y)
##训练GAT分类器
model = GAT(hidden_channels=16)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
def train():
model.train()
optimizer.zero_grad() # Clear gradients.
out = model(data.x, data.edge_index) # Perform a single forward pass.
loss = criterion(out[data.train_mask], data.y[data.train_mask]) # Compute the loss solely based on the training nodes.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
return loss
for epoch in range(1, 201):
loss = train()
# print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')
##def test():
model.eval()
out = model(data.x, data.edge_index)
pred = out.argmax(dim=1) # Use the class with highest probability.
test_correct = pred[data.test_mask] == data.y[data.test_mask] # Check against ground-truth labels.
test_acc = int(test_correct.sum()) / int(data.test_mask.sum()) # Derive ratio of correct predictions.
return test_acc
test_acc = test()
print(f'Test Accuracy: {test_acc:.4f}')
##训练后数据可视化
model.eval()
out = model(data.x, data.edge_index)
visualize(out, color=data.y)
结果对比分析
- 实测精度(未调参情况):ACC(GCN)>ACC(GAT)>ACC(MLP)
- 原因:在节点表征的学习中,MLP节点分类器只考虑了节点自身属性,忽略了节点之间的连接关系,它的结果是最差的;而GCN与GAT节点分类器,同时考虑了节点自身属性与周围邻居节点的属性,它们的结果优于MLP节点分类器。从中可以看出邻居节点的信息对于节点分类任务的重要性。
- GCN与GAT的区别在于邻居节点信息聚合过程中的归一化方法不同:
- 前者根据中心节点与邻居节点的度计算归一化系数,后者根据中心节点与邻居节点的相似度计算归一化系数。
- 前者的归一化方式依赖于图的拓扑结构,不同节点其自身的度不同、其邻居的度也不同,在一些应用中可能会影响泛化能力。
- 后者的归一化方式依赖于中心节点与邻居节点的相似度,相似度是训练得到的,因此不受图的拓扑结构的影响,在不同的任务中都会有较好的泛化表现。
参考资料:
https://github.com/datawhalechina/team-learning-nlp/tree
https://zhuanlan.zhihu.com/p/78452993