基于图神经网络的节点表征学习

基于图神经网络的节点表征学习

通过节点分类任务来比较MLP和GCN,GAT(两个知名度很高的图神经网络)三者的节点表征学习能力

一、加载数据

from torch_geometric.datasets import Planetoid
from torch_geometric.transforms import NormalizeFeatures
dataset = Planetoid(root='./dataset', name='Cora',transform=NormalizeFeatures())
print()
print(f'Dataset: {dataset}:')
print('======================')
print(f'Number of graphs: {len(dataset)}')
print(f'Number of features: {dataset.num_features}')
print(f'Number of classes: {dataset.num_classes}')
data = dataset[0] # Get the first graph object.
print()
print(data)
print('======================')
# Gather some statistics about the graph.
print(f'Number of nodes: {data.num_nodes}')
print(f'Number of edges: {data.num_edges}')
print(f'Average node degree: {data.num_edges /data.num_nodes:.2f}')
print(f'Number of training nodes:{data.train_mask.sum()}')
print(f'Training node label rate:{int(data.train_mask.sum()) / data.num_nodes:.2f}')
print(f'Contains isolated nodes:{data.contains_isolated_nodes()}')
print(f'Contains self-loops:{data.contains_self_loops()}')
print(f'Is undirected: {data.is_undirected()}')
Dataset: Cora():
======================
Number of graphs: 1
Number of features: 1433
Number of classes: 7

Data(edge_index=[2, 10556], test_mask=[2708], train_mask=[2708], val_mask=[2708], x=[2708, 1433], y=[2708])
======================
Number of nodes: 2708
Number of edges: 10556
Average node degree: 3.90
Number of training nodes:140
Training node label rate:0.05
Contains isolated nodes:False
Contains self-loops:False
Is undirected: True

Cora 图拥有2,708个节点和10,556条边,平均节点度为3.9。我们仅使用140个有真实标签的节点(每类20个)用于训练。有标签的节点的比例只占到5%。

可视化节点表征分布

二、MLP图节点分类器

import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
def visualize(h, color):
    z =    TSNE(n_components=2).fit_transform(out.detach().cpu().numpy())
    plt.figure(figsize=(10,10))
    plt.xticks([])
    plt.yticks([])
    plt.scatter(z[:, 0], z[:, 1], s=70, c=color,cmap="Set2")
    plt.show()
import torch
from torch.nn import Linear
import torch.nn.functional as F
class MLP(torch.nn.Module):
    def __init__(self, hidden_channels):
        super(MLP, self).__init__()
        torch.manual_seed(12345)
        self.lin1 = Linear(dataset.num_features,hidden_channels)
        self.lin2 = Linear(hidden_channels,dataset.num_classes)
    def forward(self, x):
        x = self.lin1(x)
        x = x.relu()
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return x
model = MLP(hidden_channels=16)
print(model)
MLP(
  (lin1): Linear(in_features=1433, out_features=16, bias=True)
  (lin2): Linear(in_features=16, out_features=7, bias=True)
)
model = MLP(hidden_channels=16)
criterion = torch.nn.CrossEntropyLoss() # Define loss criterion.
optimizer = torch.optim.Adam(model.parameters(),lr=0.01, weight_decay=5e-4) # Define optimizer.
def train():
    model.train()
    optimizer.zero_grad() # Clear gradients.
    out = model(data.x) # Perform a single forward pass.
    loss = criterion(out[data.train_mask],data.y[data.train_mask]) # Compute the loss solely based on the training nodes.
    loss.backward() # Derive gradients.
    optimizer.step() # Update parameters based on gradients.
    return loss
for epoch in range(1, 201):
    loss = train()
    if epoch % 10 == 0:
        print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')
Epoch: 010, Loss: 1.8893
Epoch: 020, Loss: 1.7441
Epoch: 030, Loss: 1.5020
Epoch: 040, Loss: 1.2543
Epoch: 050, Loss: 1.0563
Epoch: 060, Loss: 0.8578
Epoch: 070, Loss: 0.6864
Epoch: 080, Loss: 0.6368
Epoch: 090, Loss: 0.5520
Epoch: 100, Loss: 0.5350
Epoch: 110, Loss: 0.4908
Epoch: 120, Loss: 0.4745
Epoch: 130, Loss: 0.4603
Epoch: 140, Loss: 0.4031
Epoch: 150, Loss: 0.4212
Epoch: 160, Loss: 0.3782
Epoch: 170, Loss: 0.3704
Epoch: 180, Loss: 0.4203
Epoch: 190, Loss: 0.3912
Epoch: 200, Loss: 0.3810
def test():
    model.eval()
    out = model(data.x)
    pred = out.argmax(dim=1) # Use the class with highest probability.
    test_correct = pred[data.test_mask] ==data.y[data.test_mask] # Check against ground-truth labels.
    test_acc = int(test_correct.sum()) /int(data.test_mask.sum()) # Derive ratio of correct predictions.
    return test_acc
test_acc = test()
print(f'Test Accuracy: {test_acc:.4f}')
Test Accuracy: 0.5900

三 GCN及其在图节点分类任务中的应用

from torch_geometric.nn import GCNConv
class GCN(torch.nn.Module):
    def __init__(self, hidden_channels):
        super(GCN, self).__init__()
        torch.manual_seed(12345)
        self.conv1 = GCNConv(dataset.num_features,
        hidden_channels)
        self.conv2 = GCNConv(hidden_channels,
        dataset.num_classes)
    def forward(self, x, edge_index):
        x = self.conv1(x, edge_index)
        x = x.relu()
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.conv2(x, edge_index)
        return x
model = GCN(hidden_channels=16)
print(model)
GCN(
  (conv1): GCNConv(1433, 16)
  (conv2): GCNConv(16, 7)
)

四、 可视化网络

model.eval()
out = model(data.x, data.edge_index)
visualize(out, color=data.y)

在这里插入图片描述

model = GCN(hidden_channels=16)
optimizer = torch.optim.Adam(model.parameters(),lr=0.01, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
def train():
    model.train()
    optimizer.zero_grad() # Clear gradients.
    out = model(data.x, data.edge_index) # Perform a single forward pass.
    loss = criterion(out[data.train_mask],data.y[data.train_mask]) # Compute the loss solely based on the training nodes.
    loss.backward() # Derive gradients.
    optimizer.step() # Update parameters based on gradients.
    return loss
for epoch in range(1, 201):
    loss = train()
    if epoch % 10 == 0:
        print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')
Epoch: 010, Loss: 1.8616
Epoch: 020, Loss: 1.7184
Epoch: 030, Loss: 1.5370
Epoch: 040, Loss: 1.3363
Epoch: 050, Loss: 1.1714
Epoch: 060, Loss: 1.0066
Epoch: 070, Loss: 0.8705
Epoch: 080, Loss: 0.7248
Epoch: 090, Loss: 0.6769
Epoch: 100, Loss: 0.5833
Epoch: 110, Loss: 0.5209
Epoch: 120, Loss: 0.5064
Epoch: 130, Loss: 0.4466
Epoch: 140, Loss: 0.4131
Epoch: 150, Loss: 0.4119
Epoch: 160, Loss: 0.3799
Epoch: 170, Loss: 0.3272
Epoch: 180, Loss: 0.3186
Epoch: 190, Loss: 0.3081
Epoch: 200, Loss: 0.3006
def test():
    model.eval()
    out = model(data.x, data.edge_index)
    pred = out.argmax(dim=1) # Use the class with highest probability.
    test_correct = pred[data.test_mask] ==data.y[data.test_mask] # Check against ground-truth labels.
    test_acc = int(test_correct.sum()) /int(data.test_mask.sum()) # Derive ratio of correct predictions.
    return test_acc
test_acc = test()
print(f'Test Accuracy: {test_acc:.4f}')
Test Accuracy: 0.8140

五、查看训练完的网络结构

model.eval()
out = model(data.x, data.edge_index)
visualize(out, color=data.y)

在这里插入图片描述

参考资料:https://github.com/datawhalechina/team-learning-nlp/tree/master/GNN

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值