class AttentionModule(torch.nn.Module):
"""
SimGNN Attention Module to make a pass on graph.
"""
def __init__(self, args, num):
"""
:param args: Arguments object.
"""
super(AttentionModule, self).__init__()
self.args = args
self.num = num
self.setup_weights()
self.init_parameters()
def setup_weights(self):
"""
Defining weights.
"""
self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.args.filters_3,
self.args.filters_3))
def init_parameters(self):
"""
Initializing weights.
"""
torch.nn.init.xavier_uniform_(self.weight_matrix)
def forward(self, embedding):
"""
Making a forward propagation pass to create a graph level representation.
:param embedding: Result of the GCN.
:return representation: A graph level representation vector.
"""
self.num += 1
#embedding=n*32,weight_matrix=32*32--->mean--->1*32
global_context = torch.mean(torch.matmul(embedding, self.weight_matrix), dim=0)
transformed_global = torch.tanh(global_context)
#transformed_global.view(-1, 1)就是转置操作,
#embedding=n*32,transformed_global=32*1--->n*1
sigmoid_scores = torch.sigmoid(torch.mm(embedding, transformed_global.view(-1, 1)))
f = open('attention.txt', 'a')
print("test " + str(int((self.num+1)/2)) + " begin===============", file=f)
print("attention_for_nodes:", file=f)
print(sigmoid_scores, file=f)
#torch.t(embedding)=32*n------sigmoid_scores=n*1
representation = torch.mm(torch.t(embedding), sigmoid_scores)
print("end", file=f)
f.close()
return representation
如图中红框所示,上述代码为attention注意力机制的实现