Pytorch实现图神经网络中GCN\GAT\GraphSAGE\SGC

该文展示了如何在PyTorch中实现图神经网络的四种不同变体:GCN、GraphSAGE、GAT和SGC。每个模型都在Planetoid数据集上的Cora数据子集上进行训练和评估,用于节点分类任务。通过卷积操作,这些模型处理节点特征和邻接关系,利用激活函数、dropout等技术防止过拟合,并计算预测的准确率。
摘要由CSDN通过智能技术生成

Pytorch实现图神经网络中GCN\GAT\GraphSAGE\SGC

GCN

import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv,SAGEConv,GATConv
from torch_geometric.datasets import Planetoid
import time
# 加载数据,出错可自行下载,解决方案见下文
# dataset = Planetoid(root='./tmp/Cora', name='Cora')
dataset = Planetoid(root='./data', name='Cora')


class GCN_NET(torch.nn.Module):

    def __init__(self, features, hidden, classes):
        super(GCN_NET, self).__init__()
        self.conv1 = GCNConv(features, hidden)  # shape(输入的节点特征维度 * 中间隐藏层的维度)
        self.conv2 = GCNConv(hidden, classes)  # shaape(中间隐藏层的维度 * 节点类别)

    def forward(self, data):
        # 加载节点特征和邻接关系
        x, edge_index = data.x, data.edge_index
        # 传入卷积层
        x = self.conv1(x, edge_index)
        x = F.relu(x)  # 激活函数
        x = F.dropout(x, training=self.training)  # dropout层,防止过拟合
        x = self.conv2(x, edge_index)  # 第二层卷积层
        # 将经过两层卷积得到的特征输入log_softmax函数得到概率分布
        return F.log_softmax(x, dim=1)


# 判断是否有GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
# 构建模型,设置中间隐藏层维度为16
model = GCN_NET(dataset.num_node_features, 16, dataset.num_classes).to(device)
# 加载数据
data = dataset[0].to(device)
# 定义优化函数
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)

model.train()
epochs = 1000
for epoch in range(epochs):

    optimizer.zero_grad() # 梯度设为零
    out = model(data)  # 模型输出
    loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])  # 计算损失
    print("{}|{}===> loss = {}".format(epoch, epochs, loss))
    loss.backward()  # 反向传播计算梯度
    optimizer.step()  # 一步优化


model.eval()  # 评估模型
_, pred = model(data).max(dim=1)  # 得到模型输出的类别
correct = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())  # 计算正确的个数
acc = correct / int(data.test_mask.sum())  # 得出准确率
print('GCN Accuracy: {:.4f}'.format(acc))

GraphSAGE

# 导入需要的包,遇到安装问题可在官方文档或其他文章查找解决方案
import torch
import torch.nn.functional as F
# 导入GCN层、GraphSAGE层和GAT层
from torch_geometric.nn import GCNConv, SAGEConv, GATConv
from torch_geometric.datasets import Planetoid

# 加载数据,出错可自行下载,解决方案见下文
dataset = Planetoid(root='./data', name='Cora')

"""
监督式;
Graph sage:在聚合邻域节点信息中先采样再聚合
Graph attentation:聚合时考虑邻域节点的权重
半监督式:
Graph Auto-Encoder:图自编码器
图生成网络:
图时空网络:Graph Spatial-Temporal Network
"""


class GraphSAGE_NET(torch.nn.Module):

    def __init__(self, feature, hidden, classes):
        super(GraphSAGE_NET, self).__init__()
        self.sage1 = SAGEConv(feature, hidden)  # 定义两层GraphSAGE层
        self.sage2 = SAGEConv(hidden, classes)

    def forward(self, data):
        x, edge_index = data.x, data.edge_index

        x = self.sage1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, training=self.training)
        x = self.sage2(x, edge_index)

        return F.log_softmax(x, dim=1)


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GraphSAGE_NET(dataset.num_node_features, 16, dataset.num_classes).to(device)  # 定义GraphSAGE
data = dataset[0].to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)

model.train()
epochs = 1000
for epoch in range(epochs):
    optimizer.zero_grad()
    out = model(data)
    loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
    print("{}|{}===> loss = {}".format(epoch, epochs, loss))
    loss.backward()
    optimizer.step()


model.eval()
_, pred = model(data).max(dim=1)
correct = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
acc = correct / int(data.test_mask.sum())
print('GraphSAGE Accuracy: {:.4f}'.format(acc))

GAT

# 导入需要的包,遇到安装问题可在官方文档或其他文章查找解决方案
import torch
import torch.nn.functional as F
# 导入GCN层、GraphSAGE层和GAT层
from torch_geometric.nn import GCNConv, SAGEConv, GATConv
from torch_geometric.datasets import Planetoid

# 加载数据,出错可自行下载,解决方案见下文
dataset = Planetoid(root='./data', name='Cora')

class GAT_NET(torch.nn.Module):
    def __init__(self, features, hidden, classes, heads=4):
        super(GAT_NET, self).__init__()
        self.gat1 = GATConv(features, hidden, heads=4)  # 定义GAT层,使用多头注意力机制
        self.gat2 = GATConv(hidden*heads, classes)  # 因为多头注意力是将向量拼接,所以维度乘以头数。

    def forward(self, data):
        x, edge_index = data.x, data.edge_index

        x = self.gat1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, training=self.training)
        x = self.gat2(x, edge_index)

        return F.log_softmax(x, dim=1)


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GAT_NET(dataset.num_node_features, 16, dataset.num_classes, heads=4).to(device)  # 定义GAT
data = dataset[0].to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)

model.train()
epochs = 1000
for epoch in range(epochs):
    optimizer.zero_grad()
    out = model(data)
    loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
    print("{}|{}===> loss = {}".format(epoch,epochs,loss))
    loss.backward()
    optimizer.step()


model.eval()
_, pred = model(data).max(dim=1)
correct = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
acc = correct / int(data.test_mask.sum())
print('GAT Accuracy: {:.4f}'.format(acc))

SGC

import torch
import torch.nn.functional as F
from torch_geometric.nn import SGConv
from torch_geometric.datasets import Planetoid

# 加载数据,出错可自行下载,解决方案见下文
# dataset = Planetoid(root='./tmp/Cora', name='Cora')
dataset = Planetoid(root='./data', name='Cora')


class SGC(torch.nn.Module):

    def __init__(self, features, hidden, classes):
        super(SGC, self).__init__()
        self.conv1 = SGConv(features, hidden)  # shape(输入的节点特征维度 * 中间隐藏层的维度)
        self.conv2 = SGConv(hidden, classes)  # shaape(中间隐藏层的维度 * 节点类别)

    def forward(self, data):
        # 加载节点特征和邻接关系
        x, edge_index = data.x, data.edge_index
        # 传入卷积层
        x = self.conv1(x, edge_index)
        x = F.relu(x)  # 激活函数
        x = F.dropout(x, training=self.training)  # dropout层,防止过拟合
        x = self.conv2(x, edge_index)  # 第二层卷积层
        # 将经过两层卷积得到的特征输入log_softmax函数得到概率分布
        return F.log_softmax(x, dim=1)


# 判断是否有GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 构建模型,设置中间隐藏层维度为16
model = SGC(dataset.num_node_features, 16, dataset.num_classes).to(device)
# 加载数据
data = dataset[0].to(device)
# 定义优化函数
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)

model.train()
epochs = 1000
for epoch in range(epochs):
    optimizer.zero_grad() # 梯度设为零
    out = model(data)  # 模型输出
    loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])  # 计算损失
    print("{}|{}===> loss = {}".format(epoch, epochs, loss))
    loss.backward()  # 反向传播计算梯度
    optimizer.step()  # 一步优化


model.eval()  # 评估模型
_, pred = model(data).max(dim=1)  # 得到模型输出的类别
correct = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())  # 计算正确的个数
acc = correct / int(data.test_mask.sum())  # 得出准确率
print('GCN Accuracy: {:.4f}'.format(acc))
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值