文件在我的github里:https://github.com/VICKY-ZZ/DeBERTa
# 参考博客:https://yam.gift/2020/06/27/Paper/2020-06-27-DeBERTa/
class DientangledSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size/config.num_attention_heads)
# 为啥不直接等于hidden_size呢???
self.all_head_size = self.num_attention_heads*self.attention_head_size
# Wq,c,生成Qc,变成三份--QKV
self.in_proj = torch.nn.Linear(config.hidden_size,self.all_head_size*3, bias = False)
self.q_bias = torch.nn.Parameter(
torch.zeros((self.all_head_size),dtype=torch.float)
)
self.v_bias = torch.nn.Parameter(
torch.zeros((self.all_head_size), dtype = torch.float)
)
# ----------pos篇
self.pos_att_type = ['p2c','c2p']
self.max_relative_positions = config.max_relative_positions
# pos的dropout for what???
self.pos_dropout = StableDropout(config.hidden_dropout_prob)
self.pos_proj = torch.nn.Linear(config.hidden_size,self.all_head_size)
self.dropout = StableDropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self,x):
new_x_shape = x.size()[:-1]+(self.num_attention_heads,-1)
# 相当于不要x的最后一维(应该是hidden_size),然后换成num_att_head,每个head大小(head_size)
x = x.view(*new_x_shape)