1. RUN_GNN.py
1.1 class RUN_GNN(torch.nn.Module):
1.1.1 def __init__(self, params, loader):
与inductive相比删掉了
# 根据参数中是否存在 uniform_parm 且其值大于0,设置是否使用均匀参数(uniform parameters)。
if "uniform_parm" in params and params.uniform_parm > 0:
self.uniform_parm=True
else:
self.uniform_parm=False
######
增加了
self.params = params
1.1.2 def forward(self, subs, rels, mode='train'):
cuda问题,可以忽略
加入self.params=params也是为了引入device = self.params.device
def forward(self, subs, rels, mode='train'):
######RUN-GNN新加的已经标柱出来,其他的cuda问题可忽略
n = len(subs)
if isinstance(subs,torch.Tensor):
q_sub = subs.to(device = self.params.device)
q_rel = rels.to(device = self.params.device)
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
elif torch.cuda.is_available():
q_sub = torch.LongTensor(subs).to(device = self.params.device)
q_rel = torch.LongTensor(rels).cuda()
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
elif torch.cuda.is_available():
q_sub = torch.LongTensor(subs).to(device = self.params.device)
q_rel = torch.LongTensor(rels).cuda()
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
else:
q_sub = torch.LongTensor(subs)
q_rel = torch.LongTensor(rels)
h0 = torch.zeros((n, self.hidden_dim))
nodes = torch.cat([torch.arange(n).unsqueeze(1), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim)
scores_all = []
for i in range(self.n_layer):
nodes, edges, old_nodes_new_idx = self.loader.get_neighbors(nodes.data.cpu().numpy(), mode=mode)
hidden, h_n_qr = self.gnn_layers[i](q_sub, q_rel, hidden, edges, nodes.size(0), old_nodes_new_idx) ########RUN-GNN多加了h_n_qr
但是最后,inductive是这样的
if i < (self.n_layer-1):
# 在前 n_layer-1 层使用基本的 GNN 层进行更新
hidden, h_n_qr = self.gnn_layers[0](q_sub, q_rel, hidden, edges, nodes.size(0), old_nodes_new_idx)
else:
# 在最后一层使用特殊的 GNN 层进行更新
hidden, h_n_qr = self.gnn_layers[i](q_sub, q_rel, hidden, edges, nodes.size(0), old_nodes_new_idx)
倒数第二行,也是cuda问题
if torch.cuda.is_available():
scores_all = torch.zeros((n, self.loader.n_ent)).cuda()
else:
scores_all = torch.zeros((n, self.loader.n_ent))
1.2 class G_GAT_Layer(torch.nn.Module):
无变化
2. RUN_GNN_raw.py
都是与RUN_GNN.py相比的
2.1 class RUN_GNN_raw(torch.nn.Module):
2.1.1 def __init__(self, params, loader):
删除了
# self.params = params
2.1.2 def forward(self, subs, rels, mode='train'):
变化
def forward(self, subs, rels, mode='train'):
n = len(subs)
##########RUN-GNN和RUN-GNN_raw区别
if torch.cuda.is_available():
q_sub = torch.LongTensor(subs).cuda()
q_rel = torch.LongTensor(rels).cuda()
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
else:
q_sub = torch.LongTensor(subs)
q_rel = torch.LongTensor(rels)
h0 = torch.zeros((n, self.hidden_dim))
nodes = torch.cat([torch.arange(n).unsqueeze(1), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim)
2.2 class G_GAT_Layer(torch.nn.Module):
3. w_addition.py
3.1 class w_addition(torch.nn.Module):
删除了
# self.params = params
改变了
def forward(self, subs, rels, mode='train'):
n = len(subs)
if torch.cuda.is_available():
q_sub = torch.LongTensor(subs).cuda()
q_rel = torch.LongTensor(rels).cuda()
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
else:
q_sub = torch.LongTensor(subs)
q_rel = torch.LongTensor(rels)
h0 = torch.zeros((n, self.hidden_dim))
nodes = torch.cat([torch.arange(n).unsqueeze(1), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim)
3.2 class SES_v20p4(torch.nn.Module):
self.n_layer 变为self.n_layer + 1
self.gnn_layers = []
for i in range(self.n_layer + 1):
self.gnn_layers.append(G_GAT_Layer(self.hidden_dim, self.hidden_dim, self.attn_dim, self.n_rel, act=act))
self.gnn_layers = nn.ModuleList(self.gnn_layers)
删除了self.n_extra_layer = params.n_extra_layer,self.n_extra_layer变self.n_layer
self.extra_gnn_layers = []
for i in range(self.n_layer):
self.extra_gnn_layers.append(G_GAT_Layer(self.hidden_dim, self.hidden_dim, self.attn_dim, self.n_rel, act=act))
self.extra_gnn_layers = nn.ModuleList(self.extra_gnn_layers)
删除了
self.params = params
改变了
def forward(self, subs, rels, mode='train'):
n = len(subs)
if torch.cuda.is_available():
q_sub = torch.LongTensor(subs).cuda()
q_rel = torch.LongTensor(rels).cuda()
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
else:
q_sub = torch.LongTensor(subs)
q_rel = torch.LongTensor(rels)
h0 = torch.zeros((n, self.hidden_dim))
nodes = torch.cat([torch.arange(n).unsqueeze(1), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim)
for i in range(self.n_extra_layer):改变为
for i in range(4):
hidden = hidden[old_nodes_new_idx]
hidden, h_n_qr = self.extra_gnn_layers[i](q_sub, q_rel, hidden, edges, nodes.size(0), old_nodes_new_idx)
hidden = self.dropout(hidden)
hidden = self.gate(hidden, h_n_qr, h0)
h0 = hidden
3.3 class G_GAT_Layer(torch.nn.Module):
unique_relation_identity = self.gate(h_r, h_qr, h_s)变为
unique_relation_identity = h_r+h_s
4. w_sigmoid.py
4.1 class w_sigmoid(torch.nn.Module):
def forward(self, subs, rels, mode='train'):
n = len(subs)
#######和RUN_GNN相比变动
if isinstance(subs,torch.Tensor):
q_sub = subs.to(device = self.params.device)
q_rel = rels.to(device = self.params.device)
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
elif torch.cuda.is_available():
q_sub = torch.LongTensor(subs).to(device = self.params.device)
q_rel = torch.LongTensor(rels).cuda()
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
else:
q_sub = torch.LongTensor(subs)
q_rel = torch.LongTensor(rels)
h0 = torch.zeros((n, self.hidden_dim))
nodes = torch.cat([torch.arange(n).unsqueeze(1), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim)
4.2 class SES_v20p4(torch.nn.Module):
self.n_layer 变为self.n_layer + 1
self.gnn_layers = []
for i in range(self.n_layer+1):
self.gnn_layers.append(G_GAT_Layer(self.hidden_dim, self.hidden_dim, self.attn_dim, self.n_rel, act=act))
self.gnn_layers = nn.ModuleList(self.gnn_layers)
删除了self.n_extra_layer = params.n_extra_layer,self.n_extra_layer变self.n_layer
self.extra_gnn_layers = []
for i in range(self.n_layer):
self.extra_gnn_layers.append(G_GAT_Layer(self.hidden_dim, self.hidden_dim, self.attn_dim, self.n_rel, act=act))
self.extra_gnn_layers = nn.ModuleList(self.extra_gnn_layers)
删除了
self.params = params
改变了
def forward(self, subs, rels, mode='train'):
n = len(subs)
if torch.cuda.is_available():
q_sub = torch.LongTensor(subs).cuda()
q_rel = torch.LongTensor(rels).cuda()
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
else:
q_sub = torch.LongTensor(subs)
q_rel = torch.LongTensor(rels)
h0 = torch.zeros((n, self.hidden_dim))
nodes = torch.cat([torch.arange(n).unsqueeze(1), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim)
for i in range(self.n_extra_layer):改变为
for i in range(4):
hidden = hidden[old_nodes_new_idx]
hidden, h_n_qr = self.extra_gnn_layers[i](q_sub, q_rel, hidden, edges, nodes.size(0), old_nodes_new_idx)
hidden = self.dropout(hidden)
hidden = self.gate(hidden, h_n_qr, h0)
h0 = hidden
4.3 class G_GAT_Layer(torch.nn.Module):
unique_relation_identity = self.gate(h_r, h_qr, h_s)
unique_message = unique_relation_identity
unique_attend_weight = self.w_alpha(self.relu(self.Ws_attn(unique_message) + self.Wqr_attn(h_qr)))
unique_exp_attend = torch.exp(unique_attend_weight)
exp_attend = unique_exp_attend[inverse_indices]
unique_message = unique_exp_attend * unique_message
message = unique_message[inverse_indices]
sum_exp_attend = scatter(exp_attend, dim=0, index=obj, dim_size=n_node, reduce="sum")
no_attend_message_agg = scatter(message, index=obj, dim=0, dim_size=n_node, reduce='sum')
message_agg = no_attend_message_agg / sum_exp_attend
变为:
unique_message = self.gate(h_r, h_qr, h_s)
unique_attend_weight = self.w_alpha(self.relu(self.Ws_attn(unique_message) + self.Wqr_attn(h_qr)))
########变动
unique_sig_attend = torch.sigmoid(unique_attend_weight)
unique_message = unique_sig_attend * unique_message
message = unique_message[inverse_indices]
message_agg = scatter(message, index=obj, dim=0, dim_size=n_node, reduce='sum')
5. w_times.py
5.1 class w_times(torch.nn.Module):
删除了
self.params = params
改变了
def forward(self, subs, rels, mode='train'):
n = len(subs)
if torch.cuda.is_available():
q_sub = torch.LongTensor(subs).cuda()
q_rel = torch.LongTensor(rels).cuda()
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
else:
q_sub = torch.LongTensor(subs)
q_rel = torch.LongTensor(rels)
h0 = torch.zeros((n, self.hidden_dim))
nodes = torch.cat([torch.arange(n).unsqueeze(1), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim)
5.2 class G_GAT_Layer(torch.nn.Module):
unique_relation_identity = self.gate(h_r, h_qr, h_s)改为
unique_relation_identity = h_r * h_s
6. wo_buffer.py
6.1 class wo_buffer(torch.nn.Module):
没加
# self.n_extra_layer = params.n_extra_layer
# self.extra_gnn_layers = []
# for i in range(self.n_extra_layer):
# self.extra_gnn_layers.append(
# G_GAT_Layer(self.hidden_dim, self.hidden_dim, self.attn_dim, self.n_rel, act=act))
# self.extra_gnn_layers = nn.ModuleList(self.extra_gnn_layers)
改变了
def forward(self, subs, rels, mode='train'):
n = len(subs)
if isinstance(subs,torch.Tensor):
q_sub = subs.to(device = self.params.device)
q_rel = rels.to(device = self.params.device)
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
elif torch.cuda.is_available():
q_sub = torch.LongTensor(subs).to(device = self.params.device)
q_rel = torch.LongTensor(rels).to(device = self.params.device)
h0 = torch.zeros((n, self.hidden_dim)).cuda()
nodes = torch.cat([torch.arange(n).unsqueeze(1).cuda(), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim).cuda()
else:
q_sub = torch.LongTensor(subs)
q_rel = torch.LongTensor(rels)
h0 = torch.zeros((n, self.hidden_dim))
nodes = torch.cat([torch.arange(n).unsqueeze(1), q_sub.unsqueeze(1)], 1)
hidden = torch.zeros(n, self.hidden_dim)
没加
# for i in range(self.n_extra_layer):
# hidden = hidden[old_nodes_new_idx]
# hidden, h_n_qr = self.extra_gnn_layers[i](q_sub, q_rel, hidden, edges, nodes.size(0), old_nodes_new_idx)
# hidden = self.dropout(hidden)
# hidden = self.gate(hidden, h_n_qr, h0)
# h0 = hidden
6.2 class G_GAT_Layer(torch.nn.Module):
不变