- 在文章
代码库:DGL_基本框架
中出现的代码文件 dgl_1_gcn.py
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from dgl.nn import GraphConv
import dgl.data
import time
class GCN(nn.Module):
def __init__(self, in_feats, h_feats, num_classes):
super(GCN, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, num_classes)
'''
GraphConv
参数:
in_feats: int, 输入层的特征维度
out_feats: int, 输出层的特征维度
norm: str, 是否进行normalization
right: 右乘normalization矩阵,即只对输入特征进行normalization
none: no normalization
both: default, 同时考虑对输入特征和输出特征进行normalization
weight: bool, 是否对输入特征进行线性加和,default True
若False 则不考虑weight matrix
PS: in_feats and out_feats 共同构成了weight matrix的维度
bias: bool, default True
activation: default None, or callable function
'''
'''
Tradition: 将图和自身的特征独立开来 调用图的API实现相关功能
'''
def forward(self, graph, in_feature):
h = self.conv1(graph, in_feature)
h = F.relu(h)
h = self.conv2(graph, h)
return h
def train(graph, model, epochs = 10):
train_acc = 0
valid_acc = 0
optimizer = optim.Adam(model.parameters(), lr=0.01)
features = graph.ndata['feat']
labels = graph.ndata['label']
train_mask = graph.ndata['train_mask']
valid_mask = graph.ndata['val_mask']
start = time.time()
for epoch in range(epochs):
prob = model(graph, features)
pred = prob.argmax(dim=1)
loss = F.cross_entropy(prob[train_mask], labels[train_mask])
train_acc = (pred[train_mask] == labels[train_mask]).float().mean()
valid_acc = (pred[valid_mask] == labels[valid_mask]).float().mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 5 == 0:
print('In epoch {}, loss: {:.3f}, train acc: {:.3f}, val acc: {:.3f}, time: {:.3f}'
.format(epoch, loss, train_acc, valid_acc, time.time() - start))
start = time.time()
if __name__ == '__main__':
dataSet = dgl.data.CoraGraphDataset()
graph = dataSet[0]
model = GCN(graph.ndata['feat'].shape[1], 16, dataSet.num_classes)
train(graph, model, epochs=500)