增加可视化+代码注释 of GitHub项目:Graph Convolutional Networks in PyTorch
更详细的,强烈推荐另一篇博客:[GCN] 代码解析 of GitHub:Graph Convolutional Networks in PyTorch
文章目录
GitHub地址
原项目GitHub(无结果可视化):
Graph Convolutional Networks in PyTorch
可视化+代码注释GitHub:
Modification of Graph Convolutional Networks in PyTorch
可视化结果展示
可视化通过visdom完成,由t-SNE算法完成数据降维。
降维到2维:
降维到3维:
代码注释
layers.py
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
'''定义对象的属性'''
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features)) # in_features × out_features
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
'''生成权重'''
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv) # .uniform():将tensor用从均匀分布中抽样得到的值填充。
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
'''前向传播 of 一层之内:即本层的计算方法:A_hat * X * W '''
def forward(self, input, adj):
support = torch.mm(input, self.weight) # torch.mm:Matrix multiply,input和weight实现矩阵点乘。
output = torch.spmm(adj, support) # torch.spmm:稀疏矩阵乘法,sp即sparse。
if self.bias is not None:
return output + self.bias
else:
return output
'''把一个对象用字符串的形式表达出来以便辨认,在终端调用的时候会显示信息'''
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
models.py
import torch.nn as nn
import torch.nn.functional as F
from pygcn.layers import GraphConvolution
'''GCN类'''
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid) # 第一层
self.gc2 = GraphConvolution(nhid, nclass) # 第二层
self.dropout = dropout # 定义dropout
'''前向传播 of 层间:整个网络的前向传播的方式:relu(gc1) --> dropout --> gc2 --> log_softmax'''
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return F.log_softmax(x, dim=1)
train.py
from __future__ import division
from __future__ import print_function
# 路径初始化
import os, sys
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
sys.path.append('E:\\Anaconda\\lib\\site-packages\\')
# print(sys.path)
print('Path initialization finished!\n')
# 可视化增加路径
from time import time
from sklearn import manifold, datasets
# visdom显示模块
from visdom import Visdom
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from pygcn.utils import load_data, accuracy
from pygcn.models import GCN
def show_Hyperparameter(args):
argsDict = args.__dict__
print(argsDict)
print('the settings are as following:\n')
for key in argsDict:
print(key,':',argsDict[key])
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
'''计算输出时,对所有的节点计算输出'''
output = model(features, adj)
'''损失函数,仅对训练集节点计算,即:优化仅对训练集数据进行'''
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
# 计算准确率
acc_train = accuracy(output[idx_train], labels[idx_train])
# 反向传播
loss_train.backward()
# 优化
optimizer.step()
'''fastmode ? '''
if not args.fastmode:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(features, adj)
'''验证集 loss 和 accuracy '''
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
'''输出训练集+验证集的 loss 和 accuracy '''
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return output # 可视化返回output
# t-SNE 降维
def t_SNE(output, dimention):
# output:待降维的数据
# dimention:降低到的维度
tsne = manifold.TSNE(n_components=dimention, init='pca', random_state=0)
result = tsne.fit_transform(output)
return result
# Visualization with visdom
def Visualization(result, labels):
vis=Visdom()
vis.scatter(
X = result,
Y = labels+1, # 将label的最小值从0变为1,显示时label不可为0
opts=dict(markersize=5,title='Dimension reduction to %dD' %(result.shape[1])),
)
'''代码主函数开始'''
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
args = parser.parse_args()
# 显示args
show_Hyperparameter(args)
# 是否使用CUDA
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data() # 返回可视化要用的labels
# Model
model = GCN(nfeat=features.shape[1],
nhid=args.hidden,
nclass=labels.max().item() + 1,
dropout=args.dropout)
# optimizer
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
# to CUDA
if args.cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
# Train model
t_total = time.time()
for epoch in range(args.epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# Testing
output=test() # 返回output
# output的格式转换
output=output.cpu().detach().numpy()
labels=labels.cpu().detach().numpy()
# # 查看结果信息
# print(result)
# print(type(result)) # <class 'numpy.ndarray'>
# print(result.shape) # (2708, 2)
# print(labels)
# print(type(labels)) # <class 'numpy.ndarray'>
# print(labels.shape) # (2708, 2)
# Visualization with visdom
result=t_SNE(output,2)
Visualization(result,labels)
result=t_SNE(output,3)
Visualization(result,labels)
utils.py
import numpy as np
import scipy.sparse as sp
import torch
def encode_onehot(labels):
classes = set(labels) # set() 函数创建一个无序不重复元素集
# enumerate()函数生成序列,带有索引i和值c。
# 这一句将string类型的label变为int类型的label,建立映射关系
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
# map() 会根据提供的函数对指定序列做映射。
# 这一句将string类型的label替换为int类型的label
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
# 返回int类型的label
return labels_onehot
'''数据读取'''
# 更改路径。由../改为C:\Users\73416\PycharmProjects\PyGCN
def load_data(path="C:/Users/73416/PycharmProjects/PyGCN_Visualization/data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
''' cora.content 介绍:
cora.content共有2708行,每一行代表一个样本点,即一篇论文。
每一行由三部分组成:
是论文的编号,如31336;
论文的词向量,一个有1433位的二进制;
论文的类别,如Neural_Networks。总共7种类别(label)
第一个是论文编号,最后一个是论文类别,中间是自己的信息(feature)
'''
'''读取feature和label'''
# 以字符串形式读取数据集文件:各自的信息。
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
# csr_matrix:Compressed Sparse Row marix,稀疏np.array的压缩
# idx_features_labels[:, 1:-1]表明跳过论文编号和论文类别,只取自己的信息(feature of node)
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
# idx_features_labels[:, -1]表示只取最后一个,即论文类别,得到的返回值为int类型的label
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
# idx_features_labelsidx_features_labels[:, 0]表示取论文编号
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
# 通过建立论文序号的序列,得到论文序号的字典
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
# 进行一次论文序号的映射
# 论文编号没有用,需要重新的其进行编号(从0开始),然后对原编号进行替换。
# 所以目的是把离散的原始的编号,变成0 - 2707的连续编号
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
# coo_matrix():系数矩阵的压缩。分别定义有那些非零元素,以及各个非零元素对应的row和col,最后定义稀疏矩阵的shape。
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# feature和adj归一化
features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
# train set, validation set, test set的分组。
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
# 数据类型转tensor
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
# 返回数据
return adj, features, labels, idx_train, idx_val, idx_test
'''归一化函数'''
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
'''计算accuracy'''
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
'''稀疏矩阵转稀疏张量'''
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
e_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)