GammaGL论文复现-以ChebNet为例

1. 创建需要用到的文件

  1. exmaples文件夹下面创建模型训练文件夹——chebnetgcn,用于测试训练,文件夹里面创建对应的chebnetgcn_trainner.py与对应的redme文件

image-20220518141848593

  1. 在gammagl.models文件夹下面创建对应的chebnet_gcn.py模型文件,用于编写ChebNet模型。同时在models._init_.py文件里面添加from .chebnet_gcn import ChebNet

    image-20220518141918045

  2. 因为在ChebNet模型里面会用到对应的chebconv,所以在gammagl.layers.conv文件夹下创建对应的cheb_conv.py卷积层文件,用于编写chebconv卷积层。同时在conv._init_.py文件里面添加from .cheb_conv import ChebConv

    image-20220516170548541

以上就是我们需要用到的三个文件,后面主要也是针对这三个文件来进行编写,实现论文模型复现。我们接下来按照自顶向下的原则进行模型复现

2. 编写训练文件

2.1 导入对应的包与环境配置

import os

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# os.environ['TL_BACKEND'] = 'torch'
# os.environ['TL_BACKEND'] = 'mindspore'
# os.environ['TL_BACKEND'] = 'paddle'
os.environ['TL_BACKEND'] = 'tensorflow'  # set your backend here, default `tensorflow`
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import sys

sys.path.insert(0, os.path.abspath('../../'))  # adds path2gammagl to execute in command line.
import argparse
import tensorlayerx as tlx  # 导入tensorlayerx包
from gammagl.datasets import Planetoid  # 专门做读取数据的类Planetoid
from gammagl.models import ChebNet  # 导入自己写的ChebNet模型类
from tensorlayerx.model import TrainOneStep, WithLoss  # 这是tensorlayerx用来训练与计算误差的两个函数

2.2 运行参数设置

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--k", type=int, default=2, help="the k of every chebconv")
    parser.add_argument("--lr", type=float, default=0.01, help="learnin rate")
    parser.add_argument("--n_epoch", type=int, default=200, help="number of epoch")
    parser.add_argument("--hidden_dim", type=int, default=16, help="dimention of hidden layers")
    parser.add_argument("--drop_rate", type=float, default=0.5, help="drop_rate")
    parser.add_argument("--l2_coef", type=float, default=5e-4, help="l2 loss coeficient")
    parser.add_argument('--dataset', type=str, default='cora', help='dataset')
    parser.add_argument("--dataset_path", type=str, default=r'../', help="path to save dataset")
    parser.add_argument("--best_model_path", type=str, default=r'./', help="path to save best model")
    parser.add_argument("--self_loops", type=int, default=1, help="number of graph self-loop")
    args = parser.parse_args()

    main(args)

2.3 编写损失函数类

class SemiSpvzLoss(WithLoss):
    def __init__(self, net, loss_fn):
        super(SemiSpvzLoss, self).__init__(backbone=net, loss_fn=loss_fn)

    def forward(self, data, label):
        logits = self._backbone(data['x'], data['edge_index'], data['edge_weight'], data['num_nodes'])
        if tlx.BACKEND == 'mindspore':
            idx = tlx.convert_to_tensor([i for i, v in enumerate(data['train_mask']) if v], dtype=tlx.int64)
            train_logits = tlx.gather(logits, idx)
            train_label = tlx.gather(label, idx)
        else:
            train_logits = logits[data['train_mask']]
            train_label = label[data['train_mask']]
        loss = self._loss_fn(train_logits, train_label)
        return loss

2.4 编写验证函数

def evaluate(net, data, y, mask, metrics):
    net.set_eval()
    logits = net(data['x'], data['edge_index'], data['edge_weight'], data['num_nodes'])
    if tlx.BACKEND == 'mindspore':
        idx = tlx.convert_to_tensor([i for i, v in enumerate(mask) if v], dtype=tlx.int64)
        _logits = tlx.gather(logits, idx)
        _label = tlx.gather(y, idx)
    else:
        _logits = logits[mask]
        _label = y[mask]
    metrics.update(_logits, _label)
    acc = metrics.result()
    metrics.reset()
    return acc

2.3 编写训练main主函数

def main(args):
    # 1. 加载数据集
    if str.lower(args.dataset) not in ['cora', 'pubmed', 'citeseer']:
        raise ValueError('Unknown dataset: {}'.format(args.dataset))
    dataset = Planetoid(args.dataset_path, args.dataset)
    dataset.process()
    graph = dataset[0]
    graph.tensor()
    # 2. 获取图的结构数据,这里是无向图,所以图结构主要有edge_index与edge_weight
    edge_index = graph.edge_index  # 记录了哪些节点存在边
    edge_weight = tlx.ones((edge_index.shape[1],))  # 记录了每天边的权重值,相当于拉普拉斯矩阵的A_ij
    x = graph.x  # 记录每一个节点的特诊值
    y = tlx.argmax(graph.y, axis=1)  # 每一个节点的label
    # 3. 构造ChebNet模型对象
    net = ChebNet(feature_dim=x.shape[1],
                  k=args.k,
                  drop_rate=args.drop_rate,
                  name="ChebNet")
    # 4. 选择训练的优化器
    optimizer = tlx.optimizers.Adam(lr=args.lr, weight_decay=args.l2_coef)
    metrics = tlx.metrics.Accuracy()
    train_weights = net.trainable_weights
    # 5. 设置损失函数
    loss_func = SemiSpvzLoss(net, tlx.losses.softmax_cross_entropy_with_logits)
    # 6. 创建TrainOneStep模型训练对象
    train_one_step = TrainOneStep(loss_func, optimizer, train_weights)

    data = {
        "x": x,
        "edge_index": edge_index,
        "edge_weight": edge_weight,
        "train_mask": graph.train_mask,
        "test_mask": graph.test_mask,
        "val_mask": graph.val_mask,
        "num_nodes": graph.num_nodes,
    }
    # 7.模型训练
    best_val_acc = 0  # 记录最好的训练结果
    for epoch in range(args.n_epoch):
        net.set_train()  # 设置模型当前为训练模式,如果模型种有dropout操作,这是必须的
        train_loss = train_one_step(data, y)  # 调用train_one_step进行训练
        val_acc = evaluate(net, data, y, data['val_mask'], metrics)  # 用验证集评估模型训练结果
        # 打印当前epoch的训练结果
        print("Epoch [{:0>3d}] ".format(epoch + 1) + "  train loss: {:.4f}".format(
            train_loss.item()) + "  val acc: {:.4f}".format(val_acc))
        # 保存在验证集中表现最好的模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            net.save_weights(args.best_model_path + net.name + ".npz", format='npz_dict')
    # 使用最好的模型对测试集进行评测
    net.load_weights(args.best_model_path + net.name + ".npz", format='npz_dict')
    test_acc = evaluate(net, data, y, data['test_mask'], metrics)
    print("Test acc:  {:.4f}".format(test_acc))

3. 编写模型文件

这里主要是利用tensorlayerx来封装一个ChebNet模型,跟正常的卷积模型没太大区别,不同点主要是卷积层选择的不一样,所以这里的卷积层是我们是后面写的cheb_conv.py文件。

from tensorlayerx.nn import Module
import tensorlayerx as tlx
from gammagl.layers.conv.cheb_conv import ChebConv


class ChebNet(Module):
    def __init__(self, feature_dim, k, drop_rate=0.5, name=None):
        super().__init__()
        self.conv1 = ChebConv(feature_dim, 16, K=k)
        self.conv2 = ChebConv(16, feature_dim, K=k)
        self.relu = tlx.ReLU()
        self.dropout = tlx.layers.Dropout(drop_rate)
        self.name = name

    def forward(self, x, edge_index, edge_weight, num_nodes):
        x = self.conv1(x, edge_index, num_nodes, edge_weight)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.conv2(x, edge_index, num_nodes, edge_weight)
        return tlx.nn.Softmax(1)(x)


4. 编写卷积层文件

卷积层是论文的核心,ChebConv具体计算公式可以参考附录

4.1 编写网络初始化方法

def __init__(self, in_channels: int, out_channels: int, K: int, normalization: Optional[str] = 'sym',
             bias: bool = True, **kwargs):
    kwargs.setdefault('aggr', 'add')
    super(ChebConv, self).__init__()

    assert K > 0  # K值检查
    assert normalization in [None, 'sym', 'rw'], 'Invalid normalization'  # 正则化方法规则检查
    W_init = tlx.nn.initializers.truncated_normal(stddev=0.05)  # 初始化W
    b_init = tlx.nn.initializers.constant(value=0.1)  # 初始化b
    self.in_channels = in_channels
    self.out_channels = out_channels
    self.normalization = normalization
    self.lins = tlx.nn.ModuleList([
        tlx.layers.Linear(in_features=in_channels, out_features=out_channels, W_init=W_init, b_init=b_init) for _ in
        range(K)
    ])  # K层的线性层,每一层都是y=ax+b

4.2 编写标准化拉普拉斯矩阵方法

    def __normal__(self, edge_index, num_nodes: Optional[int],
                   edge_weight, normalization: Optional[str],
                   lambda_max, batch=None):
        edge_index, edge_weight = remove_self_loops(tlx.convert_to_numpy(edge_index),
                                                    tlx.convert_to_numpy(edge_weight))  # 移除自链接
        edge_index, edge_weight = get_laplacian(edge_index=edge_index, num_nodes=num_nodes,
                                                edge_weight=edge_weight, normalization_type=normalization)  # 获取拉普拉斯矩阵
        if batch is not None and lambda_max.numel() > 1:
            lambda_max = lambda_max[batch[edge_index[0]]]
        edge_weight = (2.0 * edge_weight) / lambda_max
        edge_index, edge_weight = add_self_loops(edge_index=edge_index, edge_attr=edge_weight, fill_value=-1,
                                                 num_nodes=num_nodes)  # 添加自连接
        assert edge_weight is not None
        return edge_index, edge_weight

4.2 编写forword前向传播方法

    def forward(self, x, edge_index, num_nodes, edge_weight: Optional = None, batch: Optional = None,
                lambda_max: Optional = None):
        if self.normalization != 'sym' and lambda_max is None:
            raise ValueError('You need to pass `lambda_max` to `forward() in`'
                             'case the normalization is non-symmetric.')
        if lambda_max is None:
            lambda_max = tlx.convert_to_tensor(2.0)
        assert lambda_max is not None
        edge_index, normal = self.__normal__(edge_index, num_nodes,
                                             edge_weight, self.normalization,
                                             lambda_max, batch=batch)
        Tx_0 = x
        Tx_1 = x
        out = self.lins[0](Tx_0)  # x0^

        if len(self.lins) > 1:
            Tx_1 = self.propagate(x=x, edge_index=edge_index, edge_weight=normal)
            out = out + self.lins[1](Tx_1)  # x1^

        for lin in self.lins[2:]:
            Tx_2 = self.propagate(x=Tx_1, edge_index=edge_index, edge_weight=normal)
            Tx_2 = 2 * Tx_2 - Tx_0  # x2^
            out = out + lin.forward(Tx_2)
            Tx_0, Tx_1 = Tx_1, Tx_2  # 更新 x1^,x2^

        return out

5. 训练结果

image-20220518142316588

附录:ChebConv

image-20220518142105855

image-20220518142035549

image-20220518142044969

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

梦码城

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值