1. model_utils gMHA_graphomer
gMHA_hadamard,gMHA_gt,gMHA_graphormer,mlp_mixer都是可选的model
其中mlp_mixer是MLP-mixer
gMHA是Vit
gMHA_graphomer实现了论文中的mix_layer
class MultiheadAttention
这个class就是transformer
def __init__
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
self_attention=False,
batch_first=False,
):
kdim和vdim是transformer的qkv的维度,可以为None,为None的话就直接等于嵌入的维度。
head_dim是每个注意头的维度
self.head_dim = embed_dim // num_heads
qkv的linear层
对transformer的qkv,每个都由一个embed_dim->qkv_dim的linear层来获取
出来的结果再过一层linear,_proj是用于投影的linear层
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
# self.kdim = kdim if kdim is not None else embed_dim
参数初始化def reset_parameters
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
# 经验上观察到,使用缩放初始化可以更好地收敛
这里使用的是 Xavier 初始化(nn.init.xavier_uniform_
),并且通过 gain=1 / math.sqrt(2)
进行缩放。Xavier初始化通过从均匀分布中随机采样权重,然后乘以一个缩放因子,来初始化权重。
def forward
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
attn_bias: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
padding和key padding mask
在transformer中,不同的输入有不同的长度,为了统一输入到encoder,需要使用padding(填充)
key_padding_mask: Optional[Tensor] = None,
#key_padding_mask的维度(batch, src_len)
长度小于最大长度的位置,使用padding,填充上负无穷的值,这样这些位置在softmax时权重就会无限接近0。
key_padding_mask表示哪些位置是填充得来的,填充元素的位置,mask=1
判断并保证Input shape: Time x Batch x Channel
transformer处理序列时
# 判断输入是否是batch,并且batch是不是在第二个维度上
is_batched = query.dim() == 3
if self.batch_first and is_batched:
query, key, value = [x.transpose(1, 0)
for x in (query, key, value)]
对于每一个张量(query
、key
、value
),使用 transpose(1, 0)
将第一个和第二个维度进行交换。最终目的是调整至"""Input shape: Time x Batch x Channel(Time=序列长度)
tgt_len, bsz, embed_dim = query.size()#tgt_len输出长度
src_len = tgt_len#src_len输入长度
经过linear层得到qkv
q = self.q_proj(query)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
#q现在的维度是(tgt_len,b,embed_d)->(tgt_len,b*h,h_d)
#k,v现在的维度是(src_len,b,embed_d)->(src_len,b*h,h_d)
#embed_dim= num_heads * head_dim
k = self.k_proj(query)
v = self.v_proj(query)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
numpy和torch的transpose用法区别
#numpy和torch的transpose用法不一样
#numpy.transpose((dim1,dim2,..,dimn)),将array按()里的维度重新排列
#torch.transpose(dima,dimb),将维度a和b互换
在对numpy使用reshape之前,先把维度移动到相邻位置
qkv的操作相同,先过linear层,再将维度0和维度1调换
qkv最终维度为(bsz*num_heads,len,head_dim)
由qk求attn_weights
bmm是批矩阵乘法,输入形状为 (batch_size, n, dim) 和 (batch_size, dim , p):
输出attn_weights形状为 (batch_size, n, p)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [
bsz * self.num_heads, tgt_len, src_len]
#attn_weights的维度为[bsz * self.num_heads, tgt_len, src_len]
#源码中另外有attn_mask,attn_bias,默认是none,不重要
#应用padding mask,
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len)
#attn_weights维度变为(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
#key_padding_mask由(bsz, src_len) 变为 (bsz, 1, 1,src_len)
#unsqueeze是在指定位置添加一个维度,unsqueeze(1).unsqueeze(2)即插入dim(1)=1,dim(2)=1
#masked_fill沿dim(-1)将bool类型的mask为1的位置替换为指定值,
#这里替换为float("-inf")即负无穷
attn_weights = attn_weights.view(
bsz * self.num_heads, tgt_len, src_len)#attn_weights的维度再变回去
对attn_weights作softmax
attn_weights = torch.softmax(attn_weights, dim=-1)
首先解释一下torch.softmax的用法,转自torch.softmax()_torch.softmax函数-CSDN博客
torch.softmax(x, dim=2),那么以上图为例,即x的其他dim的值不变,dim(2)上的值作softmax。
下图是transformer里的softmax,即沿Ki(i=1,2,3,...,n)(红框)这个方向作softmax
v与attn_weights相乘
attn_weights = torch.softmax(attn_weights, dim=-1)
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
#此时attn维度(bsz * self.num_heads, tgt_len, self.head_dim)
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
#num_heads*head_dim=embed_dim
#attn维度->(bsz, tgt_len, num_heads*head_dim)
attn = self.out_proj(attn)
#self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.dropout_module 是一种正则化技术,通过在训练过程中随机将一些神经元的输出设置为零,以减少模型对特定神经元的依赖,从而提高泛化性能。
MultiheadAttention得到transformer的输出attn和权重attn_weight
#省略了bsz放到dim(2)上的代码
#attn(tgt_len, bsz, embed_dim)
#attn_weights(self.num_heads, bsz, tgt_len, src_len)
return attn, attn_weights
class GraphormerEncoderLayer(nn.Module):
def __init__
模型的输入
def __init__(self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation=F.relu,
layer_norm_eps=1e-5,
batch_first=False,
norm_first=True,
device=None,
dtype=None):
模型的每个层如下,self_attn是上面提到的MultiheadAttention(transformer)
d_model=embed_dim
self.self_attn = MultiheadAttention(
embed_dim=d_model, num_heads=nhead, dropout=dropout, self_attention=True, batch_first=batch_first)
self.spatial_pos_encoder = nn.Linear(1, nhead)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)
self.num_head = nhead
这个类实现的是下图
channel mixer是_ff_block,由两个MLP组成,x先放大至dim_feedforward,再缩回d_model
def _ff_block(self, x):
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
Token mixer是_sa_block,也即MultiheadAttention+dropout
def _sa_block(self, x, attn_mask, key_padding_mask, attn_bias):
x = self.self_attn(x, x, x,
attn_bias=attn_bias,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False)[0]
return self.dropout1(x)
MultiheadAttention中有一个attn_bias
# A的维度[n_graph, n_node, n_node]
#attn_bias加在了attn_weight上
attn_bias = self.spatial_pos_encoder(
A.unsqueeze(-1)).permute(0, 3, 1, 2)
#经过A.unsqueeze(-1)).permute(0, 3, 1, 2),A的维度为[n_graph, 1,n_node, n_node]
#经过spatial_pos_encoder,得到attn维度为[n_graph, n_head, n_node, n_node]
# attn_weights += attn_bias.contiguous().view(bsz * self.num_heads, tgt_len, src_len)
#.permute重新排列tensor的维度
2.elements.py
class MLP
2层,每一层都是linear接norm,隐藏层的维度n_hid=nin
最后再加个relu激活
class MLP(nn.Module):
def __init__(self, nin, nout, nlayer=2, with_final_activation=True, with_norm=BN, bias=True):
super().__init__()
n_hid = nin
self.layers = nn.ModuleList([nn.Linear(nin if i == 0 else n_hid,
n_hid if i < nlayer-1 else nout,
# TODO: revise later
bias=True if (i == nlayer-1 and not with_final_activation and bias)
or (not with_norm) else False) # set bias=False for BN
for i in range(nlayer)])
self.norms = nn.ModuleList([nn.BatchNorm1d(n_hid if i < nlayer-1 else nout) if with_norm else Identity()
for i in range(nlayer)])
self.nlayer = nlayer
self.with_final_activation = with_final_activation
self.residual = (nin == nout) # TODO: test whether need this
def reset_parameters(self):
for layer, norm in zip(self.layers, self.norms):
layer.reset_parameters()
norm.reset_parameters()
def forward(self, x):
previous_x = x
for i, (layer, norm) in enumerate(zip(self.layers, self.norms)):
x = layer(x)
if i < self.nlayer-1 or self.with_final_activation:
x = norm(x)
x = F.relu(x)
# if self.residual:
# x = x + previous_x
return x