# step4:构造encode的self-Attention mask
# mask 的shape:[batch_size,max_src_len,max_src_len],值为1或inf
valid_encoder_pos = torch.unsqueeze(torch.cat([torch.unsqueeze(F.pad(torch.ones(L), (0, max(src_len) - L)), 0) \
for L in src_len]), 2)
valid_encoder_pos_matrix = torch.bmm(valid_encoder_pos, valid_encoder_pos.transpose(1, 2))
invalid_encoder_pos_matrix = 1 - valid_encoder_pos_matrix
mask_encoder_self_attention = invalid_encoder_pos_matrix.to(torch.bool)
# print(invalid_encoder_pos_matrix.shape)
# print(mask_encoder_self_attention)
# print(src_len)
score = torch.randn(batch_size, max(src_len), max(src_len))
masked_score = score.masked_fill(mask_encoder_self_attention, -1e9)
prob = F.softmax(masked_score, -1)
# print(src_len)
# print(score)
# print(masked_score)
# print(prob)
# softmax演示,scaled的重要性
# alpha1 = 0.1
# alpha2 = 10
# score = torch.randn(5)
# prob1 = F.softmax(score * alpha1, -1)
# prob2 = F.softmax(score * alpha2, -1)
#
#
# def softmax_func(score):
# return F.softmax(score)
#
#
# # jacobian(函数)
# jaco_mat1 = torch.autograd.functional.jacobian(softmax_func, score * alpha1)
# jaco_mat2 = torch.autograd.functional.jacobian(softmax_func, score * alpha2)
【Transformer】encoder self-attention mask
最新推荐文章于 2024-07-28 15:46:11 发布