我们使用图神经网络来生成节点表征,并通过基于监督学习的对图神经网络的训练,使得图神经网络学会产生高质量的节点表征。高质量的节点表征能够用于衡量节点的相似性,同时高质量的节点表征也是准确分类节点的前提。
获取数据集
from torch_geometric.datasets import Planetoid
from torch_geometric.transforms import NormalizeFeatures
# NormalizeFeatures在将数据输入神经网络之前对节点特征进行归一化
dataset = Planetoid(root='dataset', name='Cora', transform=NormalizeFeatures())
print(f'Dataset: {dataset}')
print(f'Number of graphs: {len(dataset)}')
print(f'Number of features: {dataset.num_features}')
print(f'Number of classes: {dataset.num_classes}')
MLP神经网络
1.创建MLP神经网络
import torch
from torch.nn import Linear
import torch.nn.functional as F
class MLP(torch.nn.Module):
def __init__(self, hidden_channels):
super(MLP, self).__init__()
torch.manual_seed(12345)
self.lin1 = Linear(dataset.num_features, hidden_channels)
self.lin2 = Linear(hidden_channels, dataset.num_classes)
def forward(self, x):
# 第一个线性层将1433维的节点表征嵌入到低维空间(hidden_channels=16)
x = self.lin1(x)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
# 第二个线性层将节点表征嵌入到类别空间(num_classes=7)
x = self.lin2(x)
return x
2.训练MLP神经网络
model = MLP(hidden_channels=16)
print(model)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
def train():
model.train()
optimizer.zero_grad()
out = model(data.x)
loss = criterion(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss
for epoch in range(1, 201):
loss = train()
print(f'Epoch: {epoch:03d}, Loss:{loss:.4f}')
3.MLP神经网络测试
def test():
model.eval()
out = model(data.x)
pred = out.argmax(dim=1)
test_correct = pred[data.test_mask] == data.y[data.test_mask]
test_acc = int(test_correct.sum()) / int(data.test_mask.sum())
return test_acc
test_acc = test()
print(f'Test Accuracy: {test_acc:.4f}')
GCN图神经网络
1.创建GCN图神经网络
import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class GCN(torch.nn.Module):
def __init__(self, hidden_channels):
super(GCN, self).__init__()
torch.manual_seed(12345)
self.conv1 = GCNConv(dataset.num_features,hidden_channels)
self.conv2 = GCNConv(hidden_channels,dataset.num_classes)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
x = self.conv2(x, edge_index)
return x
2.将未训练的节点表征可视化
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
def visualize(h, color):
z = TSNE(n_components=2).fit_transform(out.detach().cpu().numpy())
plt.figure(figsize=(10,10))
plt.xticks([])
plt.yticks([])
plt.scatter(z[:,0],z[:,1],s=70,c=color,cmap="Set2")
plt.show()
model = GCN(hidden_channels=16)
model.eval()
out = model(data.x, data.edge_index)
visualize(out, color=data.y)
3.GCN图神经网络的训练
model = GCN(hidden_channels=16)
optimizer = torch.optim.Adam(model.parameters(),lr=0.01,weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
def train():
model.train()
optimizer.zero_grad()
out = model(data.x, data.edge_index)
loss = criterion(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss
for epoch in range(1, 201):
loss = train()
print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')
4.GCN图神经网络的测试
def test():
model.eval()
out = model(data.x, data.edge_index)
pred = out.argmax(dim=1)
test_correct = pred[data.test_mask]==data.y[data.test_mask]
test_acc = int(test_correct.sum())/int(data.test_mask.sum())
return test_acc
test_acc = test()
print(f'Test Accuracy: {test_acc:.4f}')
5.可视化训练后的节点表征
model.eval()
out = model(data.x, data.edge_index)
visualize(out, color=data.y)
图注意力神经网络(GAT)
import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GAT(torch.nn.Module):
def __init__(self, hidden_channels):
super(GAT, self).__init__()
torch.manual_seed(12345)
self.conv1 = GATConv(dataset.num_features, hidden_channels)
self.conv2 = GATConv(hidden_channels, dataset.num_classes)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
x = self.conv2(x, edge_index)
return x