DGL实现同构/异构图卷积模型

同构图卷积

from time import time
import numpy as np
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F


# 模型
class TwoLayerModel(nn.Module):
    def __init__(self):
        super().__init__()
        # gcn
        self.conv1 = dgl.nn.GraphConv(256, 128)
        self.conv2 = dgl.nn.GraphConv(128, 128)
        # gat 同理,但是因为是3head,输出就变成了output_dim*3了,要注意一下其他地方维度变化
        # self.conv1 = dgl.nn.pytorch.conv.GATConv(256, 128, num_heads=3)
        # self.conv2 = dgl.nn.pytorch.conv.GATConv(3 * 128, 128, num_heads=3)

        layer_size = [128, 64, 32, 1]
        self.predict = nn.Sequential()
        for j in range(len(layer_size) - 1):
            self.predict.add_module("Linear_layer_%d" % j, nn.Linear(layer_size[j], layer_size[j + 1]))
            if j == len(layer_size) - 2:
                self.predict.add_module("Sigmoid_layer_%d" % j, nn.Sigmoid())
            else:
                self.predict.add_module("Relu_layer_%d" % j, nn.ReLU())
                
        self.lossfunc = nn.BCELoss(reduction='sum')
        self.epsilon = torch.FloatTensor([1e-12]) # 防除0

    def forward(self, blocks, x, label):
        x = self.feat_mapping[x]
        x = F.relu(self.conv1(blocks[0], x))
        x = F.relu(self.conv2(blocks[1], x))
        
        label = label.reshape(-1, 1).float()
        prob = self.predict(x / (torch.max(torch.norm(x, dim=1, keepdim=True), self.epsilon)))
        loss = self.lossfunc(prob, label)
        
        # 用于预测
        yp = prob.squeeze().detach().numpy()
        yt = label.squeeze().detach().numpy()

        return loss

# 数据
def loadData(path):
    uu_fn = ["uu_tr.csv", "uu_te.csv"]  # user relation
    uf_fn = ["uf_tr.csv", "uf_te.csv"]  # user feature
    bg_l = []
    for i in range(2):
        uu = pd.read_csv(path + uu_fn[i], header=0)
        uf = pd.read_csv(path + uf_fn[i], header=0)
        g = dgl.graph((th.tensor(uu['uid'].values), th.tensor(uu['fid'].values)), num_nodes=uf.shape[0])
        bg = dgl.to_bidirected(g)
        bg = dgl.add_self_loop(bg)
        bg.ndata['feat'] = th.LongTensor(uf.iloc[:, 1:].values)
        print(bg)
        bg_l.append(bg)

    with open(path + "u_train_test.pickle", "rb") as fp:
        X_train_p, X_train_n, X_test_p, X_test_n = pickle.load(fp)
    train_label_map = torch.zeros(max(X_train_p + X_train_n) + 1)
    train_label_map[torch.LongTensor(X_train_p)] = 1
    test_label_map = torch.zeros(max(X_test_p + X_test_n) + 1)
    test_label_map[torch.LongTensor(X_test_p)] = 1

    return bg_l, X_train_p, X_train_n, X_test_p, X_test_n, train_label_map, test_label_map


# 训练
if __name__ == "__main__":
    model = TwoLayerModel()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.001)
    
    bg_l, X_train_p, X_train_n, X_test_p, X_test_n, train_label_map, test_label_map = loadData("../data_"+args.dataPercesent+"/")
    sampler = dgl.dataloading.MultiLayerFullNeighborSampler(2)
    dataloader = dgl.dataloading.NodeDataLoader(dgl.add_self_loop(bg_l[0]), X_train_p + X_train_n, sampler, batch_size=1024 * 8, shuffle=True, drop_last=False)
    dataloader2 = dgl.dataloading.NodeDataLoader(dgl.add_self_loop(bg_l[1]), X_test_p + X_test_n, sampler, batch_size=len(X_test_p + X_test_n), shuffle=False, drop_last=False)

    # train
    r_l, p_l, f_l = [], [], []
    for epoch in range(200):
        t0 = time()
        model.train()
        train_loss = 0.
        for input_nodes, output_nodes, blocks in dataloader:
            optimizer.zero_grad()
            input_features = blocks[0].srcdata['feat']
            loss = model(blocks, input_features, train_label_map[torch.LongTensor(output_nodes)])
            loss_.backward()
            optimizer.step()
            train_loss += loss_.item()

        t1 = time()
        model.eval()
        with torch.no_grad():
            for input_nodes, output_nodes, blocks in dataloader2:
                input_features = blocks[0].srcdata['feat']
                test_loss= model(blocks, input_features, test_label_map[torch.LongTensor(output_nodes)])

        t2 = time()
        log.record(
            'epoch[%d],TrainLoss[%.2f],TestLoss[%.2f],time[%.1f + %.1f]' % (epoch, train_loss, test_loss.item(), t1 - t0, t2 - t1))

异构图卷积

from time import time
import torch
import torch.nn as nn
import random
from torch.optim import lr_scheduler
import dgl
import dgl.function as fn
import numpy as np
import pandas as pd


# 模型定义
class HeteroGNN(nn.Module):
    def __init__(self, G):
        super(HeteroGNN, self).__init__()
        self.G = G
        self.hgl = HeteroRGCNLayer()
        self.G.nodes['group'].data['emb'] = nn.Parameter(torch.randn(3320,64))
        self.G.nodes['ip'].data['emb'] = nn.Parameter(torch.randn(1242,64))

        self.uf_mapping = nn.Linear(88, 64)
        self.gf_mapping = nn.Linear(33, 64)
        self.pf_mapping = nn.Linear(690, 64)

        self.weight = nn.ModuleDict({'user': nn.Sequential(nn.Linear(64, 1), nn.Sigmoid()),
                                     'group': nn.Sequential(nn.Linear(64, 1), nn.Sigmoid())})

        layer_size = [128, 64, 32, 1]
        self.predict = nn.Sequential()
        for j in range(len(layer_size) - 1):
            self.predict.add_module("Linear_layer_%d" % j, nn.Linear(layer_size[j], layer_size[j + 1]))
            if j == len(layer_size) - 2:
                self.predict.add_module("Sigmoid_layer_%d" % j, nn.Sigmoid())
            else:
                self.predict.add_module("Relu_layer_%d" % j, nn.ReLU())

        self.lossfunc = nn.BCELoss()
        

    def forward(self, up, un, flag):
        # 对user/group的属性特征编码
        self.G.nodes['user'].data['emb'] = self.uf_mapping(self.G.nodes['user'].data['feat'])
        self.G.nodes['group'].data['emb'] = self.gf_mapping(self.G.nodes['group'].data['feat'])
        self.G.nodes['ip'].data['emb'] = self.pf_mapping(self.G.nodes['ip'].data['feat'])

        # 采样
        # self.G.nodes['user'].data['s'] = self.weight['user'](self.G.nodes['user'].data['emb'])
        # self.G.nodes['group'].data['s'] = self.weight['group'](self.G.nodes['group'].data['emb'])
        # for etype in self.G.etypes:
        #     self.G.apply_edges(message_func, etype=etype)
        # SG = dgl.sampling.select_topk(self.G, {'have': 6, 'belong': 6, 'trust': 6}, 's', edge_dir='in', ascending=True)
        SG = self.G

        self.hgl(SG)
        user = torch.LongTensor(np.concatenate([np.array(up), np.array(un)], 0))
        label = torch.LongTensor(np.concatenate([np.ones(len(up)), np.zeros(len(un))], 0)).reshape(-1, 1).float()
        user_femb = self.predict1(self.G.nodes['user'].data['h'])
        prob = self.predict2(user_femb[user])
        loss = self.lossfunc(prob, label)

        # 预测
        yp1 = prob.squeeze().detach().numpy()
        yt = label.detach().numpy()

        return loss


class HeteroRGCNLayer(nn.Module):
    def __init__(self):
        super(HeteroRGCNLayer, self).__init__()

        self.weight = nn.ModuleDict({'user': nn.Sequential(nn.Linear(64, 32)),
                                     'group': nn.Sequential(nn.Linear(64, 32)),
                                     'ip': nn.Sequential(nn.Linear(64, 32)),})
                                     
    def forward(self, G):
        # 聚合user到group上
        funcs = {}
        srctype, etype, dsttype = 'user', 'belong', 'group'
        G.nodes[srctype].data['Wh'] = self.weight[srctype](G.nodes[srctype].data['emb'])
        funcs[etype] = (fn.copy_u('Wh', 'm'), fn.max('m', 'guh'))
        # 聚合user到ip上
        srctype, etype, dsttype = 'user', 'belong2', 'ip'
        funcs[etype] = (fn.copy_u('Wh', 'm'), fn.max('m', 'puh'))
        G.multi_update_all(funcs, 'max')

        # 聚合group到user上
        funcs = {}
        srctype, etype, dsttype = 'group', 'have', 'user'
        G.nodes[srctype].data['Wh'] = self.weight[srctype](G.nodes[srctype].data['emb'])
        funcs[etype] = (fn.copy_u('Wh', 'm'), fn.max('m', 'gh'))
        # 聚合ip到user上
        srctype, etype, dsttype = 'ip', 'have2', 'user'
        G.nodes[srctype].data['Wh'] = self.weight[srctype](G.nodes[srctype].data['emb'])
        funcs[etype] = (fn.copy_u('Wh', 'm'), fn.max('m', 'ph'))
        G.multi_update_all(funcs, 'max')

        # 聚合user到user上
        funcs = {}
        # funcs['trust'] = (fn.copy_u('Wh', 'm'), fn.max('m', 'uh'))
        funcs['have'] = (fn.copy_u('guh', 'm'), fn.max('m', 'uh'))
        funcs['have2'] = (fn.copy_u('puh', 'm'), fn.max('m', 'uh'))
        G.multi_update_all(funcs, 'max')

        G.nodes['user'].data['h'] = torch.cat([G.nodes['user'].data['Wh'], G.nodes['user'].data['uh'], G.nodes['user'].data['gh'],  G.nodes['user'].data['ph']], 1)


# 加载数据
def loadData():
    ug = pd.read_csv("ug.csv")
    uf = pd.read_csv("user_feat.csv", sep=",", header=None)
    gf = pd.read_csv("group_feat.csv", sep=",", header=None)
    user_group = (th.tensor(ug['u'].values), th.tensor(ug['g'].values))
    group_user = (th.tensor(ug['g'].values), th.tensor(ug['u'].values))

    with open("data/u_train_test.pickle", "rb") as fp:
        up_train, up_test, un_train, un_test = pickle.load(fp)
            
    hg = dgl.heterograph({('user', 'belong', 'group'): user_group,
                          ('group', 'have', 'user'): group_user})
    hg.nodes['user'].data['feat'] = th.tensor(uf.values, dtype=th.float)
    hg.nodes['group'].data['feat'] = th.tensor(gf.values, dtype=th.float)

    return hg, up_train, up_test, un_train, un_test


# 训练
if __name__ == "__main__":
	G, up_train, up_test, un_train, un_test = loadData()

	model = HeteroGNN(G)
	optimizer = torch.optim.Adam(model.parameters(), lr=0.005) 

    save = False
    r_l,p_l,f_l = [],[],[]
    # nusample = random.sample(un_test, k=len(up_test))
    for epoch in range(200):
        t0 = time()
        model.train()
        optimizer.zero_grad()
        neg = random.sample(un_train, k=5 * len(up_train))
        train_loss, tauc, tf1 = model(up_train, neg, 0)
        train_loss.backward()
        optimizer.step()

        t1 = time()
        model.eval()
        with torch.no_grad():
            test_loss = model(up_test, un_test, 0)
            
        t2 = time()
        print('epoch[%d],TrainLoss[%.2f],TestLoss[%.2f],time[%.1f + %.1f]' % (epoch, train_loss.item(), test_loss.item(),t1 - t0, t2 - t1))
  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值