GNN学习总结

总结

本文记录自己的主要收获

GCN代码

import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        
        self.conv1 = GCNConv(165, 128)
        self.conv2 = GCNConv(128, 128)
        self.conv3 = GCNConv(64, 64)
        self.conv4 = GCNConv(128, 1) 

    def forward(self, x, edge_index, adj=None):
        #x, edge_index = data.x, data.edge_index
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, p=0.1, training=self.training)
        x = self.conv4(x, edge_index)

        return F.sigmoid(x)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
model.double()
data_train = data_train.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-5)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
criterion = torch.nn.BCELoss()
x, edge_index = data_train.x, data_train.edge_index

model.train()
for epoch in range(70):
    optimizer.zero_grad()
    out = model(x, edge_index)
    # data_train.y.unsqueeze(1)
    out = out.reshape((data_train.x.shape[0]))
    loss = criterion(out[train_idx], data_train.y[train_idx])
    auc = roc_auc_score(data_train.y.detach().cpu().numpy()[train_idx], out.detach().cpu().numpy()[train_idx]) #[train_idx]
    loss.backward()
    optimizer.step()
    
    if epoch%5 == 0:
        print("epoch: {} - loss: {} - roc: {}".format(epoch, loss.item(), auc))
    model.eval()
    _, pred = model(x, edge_index).max(dim=1)
    '''
    if epoch%5==0:
        plot_graph(pred,G,epoch)
        plt.show()
    pass
    '''
pass

GAT代码

class GAL(MessagePassing):
    def __init__(self,in_features,out_featrues):
        super(GAL,self).__init__(aggr='add')
        self.a = torch.nn.Parameter(torch.zeros(size=(2*out_featrues, 1)))
        torch.nn.init.xavier_uniform_(self.a.data, gain=1.414)  # 初始化
        # 定义leakyrelu激活函数
        self.leakyrelu = torch.nn.LeakyReLU()
        self.linear=torch.nn.Linear(in_features,out_featrues)
        
    def forward(self,x,edge_index):
        x=self.linear(x)
        N=x.size()[0]
        row,col=edge_index
        a_input = torch.cat([x[row], x[col]], dim=1)
        print('a_input.size',a_input.size())
        # [N, N, 1] => [N, N] 图注意力的相关系数(未归一化)
        temp=torch.mm(a_input,self.a).squeeze()
        print('temp.size',temp.size())
        e = self.leakyrelu(temp)
        print('e',e)
        print('e.size', e.size())
        #e_all为同一个节点与其全部邻居的计算的分数的和,用于计算归一化softmax
        e_all=torch.zeros(x.size()[0])
        count = 0
        for i in col:
            e_all[i]+=e[count]
            count=count+1
        print('e_all',e_all)

        for i in range(len(e)):
            e[i]=math.exp(e[i])/math.exp(e_all[col[i]])
        print('attention',e)
        print('attention.size',e.size())

        return self.propagate(edge_index,x=x,norm=e)

    def message(self, x_j, norm):
        print('x_j:', x_j)
        print('x_j.size', x_j.size())
        print('norm', norm)
        print('norm.size', norm.size())
        print('norm.view.size', norm.view(-1, 1).size())
        return norm.view(-1, 1) * x_j

ssl._create_default_https_context = ssl._create_unverified_context
dataset = Planetoid(root='Cora', name='Cora')
x=dataset[0].x
edge_index=dataset[0].edge_index

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.gal = GAL(dataset.num_node_features,16)

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = F.dropout(x, training=self.training)
        x = self.gal(x, edge_index)
        print('x_gal',x.size())
        return F.log_softmax(x, dim=1)

model=Net()
data=dataset[0]
out=Net()(data)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
model.train()
for epoch in range(1):
    optimizer.zero_grad()
    out = model(data)
    loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
    loss.backward()
    optimizer.step()
model.eval()
_, pred = model(data).max(dim=1)
correct = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
acc = correct/int(data.test_mask.sum())

对自己的科研工作也非常有帮助!

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值