【code】记录MGCN的框架

GCN的框架很简单,但是MGCN我不会写。记录多篇代码种的MGCN,希望通过比较的方法学习。
在这里插入图片描述

#####经典GCN#####
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class GraphConvolution(Module):
    """
    Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
    """
    def __init__(self, in_features, out_features, bias=True):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        if bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()
    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.uniform_(-stdv, stdv)
    def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output
    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'

第一种?

class GCN(nn.Module):
    def __init__(self, nfeat, nhid, nclass, dropout):
        super(GCN, self).__init__()
        self.nclass = nclass

        layers = []
        layers.append(GraphConvolution(nfeat, nhid[0]))
        for i in range(len(nhid)-1):
            layers.append(GraphConvolution(nhid[i], nhid[i+1]))
        if nclass > 1:
            layers.append(GraphConvolution(nhid[-1], nclass))
        self.gc = nn.ModuleList(layers)
        self.dropout = dropout

    def forward(self, x, adj):
        end_layer = len(self.gc)-1 if self.nclass > 1 else len(self.gc)
        for i in range(end_layer):
            x = F.relu(self.gc[i](x, adj))
            x = F.dropout(x, self.dropout, training=self.training)
        if self.nclass > 1:
            classifier = self.gc[-1](x, adj)
            return F.log_softmax(classifier, dim=1), x
        else:
            return None, x

第2种?

import torch
from torch.nn import Parameter
from torch_geometric.nn.inits import uniform, ones, glorot, normal


class MGConv(torch.nn.Module):
    """
    Args:
        in_channels (int): Size of each input sample.
        out_channels (int): Size of each output sample.
        K (int): Number of scales.
        bias (bool, optional): If set to :obj:`False`, the layer will not learn
            an additive bias. (default: :obj:`True`)
    """

    def __init__(self, in_channels, out_channels, K, bias=True, number=0):
        super(MGConv, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.weight = Parameter(torch.Tensor(K, in_channels, out_channels))
        self.number = number
        self.K = K

        if bias:
            self.bias = Parameter(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()

    def reset_parameters(self):
        # normal(self.weight, 0, 0.1)
        glorot(self.weight)
        # ones(self.weight)
        if self.bias is not None:
            ones(self.bias)

    def forward(self, x, Win):
        for i in range(self.weight.size(0)):
            if i == 0:
                out = torch.matmul(x, self.weight[0])
            else:
                WWW = torch.matmul(torch.t(Win[i-1]), x)
                out += torch.matmul(WWW, self.weight[i])

        torch.cuda.empty_cache()

        return out


    def __repr__(self):
        return '{}({}, {}, K={})'.format(self.__class__.__name__,
                                         self.in_channels, self.out_channels,
                                         self.weight.size(0))

第3种?

from torch import nn
import torch



# Static GCN w/ dense adj
class GCN(nn.Module):
    def __init__(self, K:int, input_dim:int, hidden_dim:int, bias=True, activation=nn.ReLU):
        super().__init__()
        self.K = K
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.bias = bias
        self.activation = activation() if activation is not None else None
        self.init_params(n_supports=K)

    def init_params(self, n_supports:int, b_init=0):
        self.W = nn.Parameter(torch.empty(n_supports*self.input_dim, self.hidden_dim), requires_grad=True)
        nn.init.xavier_normal_(self.W)
        if self.bias:
            self.b = nn.Parameter(torch.empty(self.hidden_dim), requires_grad=True)
            nn.init.constant_(self.b, val=b_init)

    def forward(self, A:torch.Tensor, x:torch.Tensor):
        '''
        Batch-wise graph convolution operation on given list of support adj matrices
        :param A: support adj matrices - torch.Tensor (K, n_nodes, n_nodes)
        :param x: graph feature/signal - torch.Tensor (batch_size, n_nodes, input_dim)
        :return: hidden representation - torch.Tensor (batch_size, n_nodes, hidden_dim)
        '''
        assert self.K == A.shape[0]

        support_list = list()
        for k in range(self.K):
            support = torch.einsum('ij,bjp->bip', [A[k,:,:], x])
            support_list.append(support)
        support_cat = torch.cat(support_list, dim=-1)

        output = torch.einsum('bip,pq->biq', [support_cat, self.W])
        if self.bias:
            output += self.b
        output = self.activation(output) if self.activation is not None else output
        return output

    def __repr__(self):
        return self.__class__.__name__ + f'({self.K} * input {self.input_dim} -> hidden {self.hidden_dim})'

第4种?

《A Spatial-Temporal Attention Multi-Graph Convolution Network for Ride-Hailing Demand Prediction Based on Periodicity with Offset》
先定义GCN

from torch import nn
import torch


# Static GCN w/ dense adj


class GCN(nn.Module):
    def __init__(self, K: int, input_dim: int, hidden_dim: int, bias=True, activation=nn.ReLU):
        super().__init__()
        self.K = K
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.bias = bias
        self.activation = activation() if activation is not None else None
        self.W = nn.Parameter(torch.empty(self.K * self.input_dim, self.hidden_dim), requires_grad=True)
        self.b = nn.Parameter(torch.empty(self.hidden_dim), requires_grad=True)

    def reset_parameters(self):
        nn.init.xavier_normal_(self.W)
        # nn.init.kaiming_normal_(self.W, nonlinearity='relu')
        if self.bias:
            nn.init.uniform_(self.b)

    def forward(self, A: torch.Tensor, x: torch.Tensor):
        """
        Batch-wise graph convolution operation on given list of support adj matrices
        :param A: support adj matrices - torch.Tensor (K, n_nodes, n_nodes)
        :param x: graph feature/signal - torch.Tensor (batch_size, n_nodes, input_dim)
        :return: hidden representation - torch.Tensor (batch_size, n_nodes, hidden_dim)
        """
        assert self.K == A.shape[0]

        support_list = list()
        for k in range(self.K):
            support = torch.einsum('ij,bjp->bip', A[k, :, :], x)
            support_list.append(support)
        support_cat = torch.cat(support_list, dim=-1)

        output = torch.einsum('bip,pq->biq', support_cat, self.W)
        if self.bias:
            output += self.b
        output = self.activation(output) if self.activation is not None else output
        return output

    def __repr__(self):
        return self.__class__.__name__ + f'({self.K} * input {self.input_dim} -> hidden {self.hidden_dim})'

再定义多图卷积

import torch
import torch.nn as nn
from layers.GCN import GCN
import torch.nn.functional as F

class MultiGraphConv(nn.Module):
    """
    多图卷积
    """
    def __init__(self, K, input_dim: int, out_dim: int, agg='sum'):
        """
        :param K: 用于区分不同的图卷积操作,GCN 为1;chebconv=3
        :param input_dim: 输入
        :param out_dim:
        :param agg : 多图的卷积的聚合方式,默认为sum
        """
        super().__init__()
        self.agg = agg
        self.gcn1 = GCN(K, input_dim, out_dim, bias=True, activation=nn.ReLU)
        self.gcn2 = GCN(K, input_dim, out_dim, bias=True, activation=nn.ReLU)
        self.gcn3 = GCN(K, input_dim, out_dim, bias=True, activation=nn.ReLU)

    def reset_parameters(self):
        self.gcn1.reset_parameters()
        self.gcn2.reset_parameters()
        self.gcn3.reset_parameters()

    def forward(self, x, A):
        """
        :param A: A: support adj matrices - torch.Tensor (3, K, n_nodes, n_nodes)
        :param x: graph feature/signal - torch.Tensor (batch_size, n_nodes, input_dim)
        :return:
        """
        out1 = self.gcn1(A[0, :, :, ], x)
        out2 = self.gcn2(A[1, :, :, ], x)
        out3 = self.gcn3(A[2, :, :, ], x)
        if self.agg == 'sum':
            return out1+out2+out3
        elif self.agg == 'max':
            out = [torch.unsqueeze(out1, dim=0), torch.unsqueeze(out1, dim=0), torch.unsqueeze(out1, dim=0)]
            out = torch.cat(out, dim=0)
            return torch.max(out, dim=0)
        else:
            raise ValueError(f'ERROR: activation function {self.agg} is not defined.')

第5种?

代码链接:https://github.com/chenjiehu/MGCN_ODA

import os
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
import torch.nn as nn
from torch.nn import functional as F
import torch
import torch.nn.init as init
import numpy as np
from torch.nn.parameter import Parameter

class GraphConvolution(nn.Module):
    def __init__(self, input_dim, output_dim, use_bias=True):
        """图卷积:L*X*\theta
        Args:
        ----------
            input_dim: int
                节点输入特征的维度
            output_dim: int
                输出特征维度
            use_bias : bool, optional
                是否使用偏置
        """
        super(GraphConvolution, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.use_bias = use_bias
        self.weight = nn.Parameter(torch.Tensor(input_dim, output_dim))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(output_dim))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self):
        init.kaiming_uniform_(self.weight)
        if self.use_bias:
            init.zeros_(self.bias)

    def forward(self, adjacency, input_feature):
        """邻接矩阵是稀疏矩阵,因此在计算时使用稀疏矩阵乘法
        Args:
        -------
            adjacency: torch.sparse.FloatTensor
                邻接矩阵
            input_feature: torch.Tensor
                输入特征
        """
        support = torch.mm(input_feature, self.weight)
        output = torch.mm(adjacency, support)
        if self.use_bias:
            output += self.bias
        return output

MCN

#改进四:多图GCN A+A^2最终版本
class MultiGCN(nn.Module):
    def __init__(self, input_dim, N_way):
        super().__init__()
        self.bn1 = nn.BatchNorm1d(input_dim)
        self.bn2 = nn.BatchNorm1d(1000)

        self.gcn = GraphConvolution(input_dim, 1000)

        self.aifa1 = nn.Parameter(torch.Tensor(1), requires_grad=False)
        self.aifa2 = nn.Parameter(torch.Tensor(1), requires_grad=True)
        self.aifa3 = nn.Parameter(torch.Tensor(1), requires_grad=True)

        self.weight = Parameter(torch.FloatTensor(input_dim, 1000))
        self.aifa1.data.fill_(0)
        self.aifa2.data.fill_(0)
        self.aifa3.data.fill_(0)


        self.test_N_way = N_way
        self.reset_parameters_kaiming()
    def forward(self,features):

        A = self.MultiAdjacencyCompute(features)
        x = self.gcn(A, features)
        x = F.relu(self.bn2(x))
        x = F.dropout(x, 0.6, training=self.training)
        return x

    def MultiAdjacencyCompute(self,features):
        N = features.size(0)
        temp = torch.norm(features.repeat(N, 1) - features.repeat(1, N).view(N * N, -1), dim=1)
        adjacency_e = torch.exp(-temp.pow(2) / 9).view(N, N)
        _, position = torch.topk(adjacency_e, round(N / (self.test_N_way)), dim=1, sorted=False, out=None, largest=True)
        adjacency0 = torch.zeros(N, N).cuda()
        D_adjacency_e = torch.zeros(N,N).cuda()
        for num in range(N):        #保留每行最大的K歌元素
            adjacency0[num, position[num,:]] = 1
            adjacency0[num,num] = 0
        adjacency_e = torch.mul(adjacency0,adjacency_e)

        adjacency = torch.eye(N).cuda() + adjacency_e

        d = torch.sum(adjacency,dim=1)
        d = d + 1
        d = torch.sqrt(d)
        D = torch.diag(d)
        inv_D = torch.inverse(D)
        adjacencyn = torch.mm(torch.mm(inv_D, adjacency),inv_D)

        data = 0.5

        aifa = F.softmax(torch.cat([self.aifa1,self.aifa2,self.aifa3],dim=0),dim=0)

        adjacency = aifa[0]*torch.eye(N).cuda() + aifa[1]*adjacencyn + aifa[2]*torch.mm(adjacencyn,adjacencyn)

        return adjacency

    def reset_parameters_kaiming(self):
        nn.init.kaiming_normal_(self.weight.data, a=0, mode='fan_in')
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值