vision transformer 笔记
本文是观看霹雳吧啦Wzup主视频的学习笔记
Patch embedding
这个patch_embdding 的过程就是把原来224*224*3的图片,分成14*14个小格子,每一个小格子是16*16的大小,然后16*16的这个数量被合并到了通道维度上
所以最后变成了 16*16*3 = 768,就是每一个小格子有768个内容
class PatchEmbed(nn.Module):
"""
2D Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
# self.grid.size = 256/16 = 14,就是每一个格子的数量
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)
# kernel_size = 16,stride = 16
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# flatten: [B, C, H, W] -> [B, C, HW]
# transpose: [B, C, HW] -> [B, HW, C]
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
return x
Attention
主要用于实现Encoder-block中的multi-head attention部分
class Attention(nn.Module):
def __init__(self,
dim, # 输入token的dim
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop_ratio=0.,
proj_drop_ratio=0.):
super(Attention, self).__init__()
self.num_heads = num_heads
# 会把整体求出的QKV根据 num_heads均分为head份
head_dim = dim // num_heads
#head_dim = 768//8,这里得到的是每一个head对应qkv的dimension
#在计算multi-head-self-attention的时候,第一步还是正常的计算QKV,然后再根据head的数量将QKV均分为很多份
# 要是没有传入scale就变成1/根号d
self.scale = qk_scale or head_dim ** -0.5
# 使用一个全连接层 一次性得到QKV三个向量
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop_ratio)
# 这里是最后的attention的计算结果b乘上w0的过程
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop_ratio)
def forward(self, x):
# [batch_size, num_patches + 1, total_embed_dim]
# [batch_size, 14*14+1代表小格子的数量加上一个classtoken,768代表每个小格子的内容]
# num_patches = 14*14=196 后面+1是因为class_token
# total_embed_dim=768
B, N, C = x.shape
# qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim],qkv的作用是把embed_dim变成了3倍的embded_dim
# reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head]
# permute: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head]
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# [batch_size, num_heads, num_patches + 1, embed_dim_per_head]
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
# transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1]
# @: multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1]
# @是矩阵乘法 如果q和k都是高维数据,会自动忽略前方相同的维度
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
# 对每一行的数据进行softmax
attn = self.attn_drop(attn)
# @: multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head]
# transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head]
# reshape: -> [batch_size, num_patches + 1, total_embed_dim]
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
# reshape的作用在于把每一个head生成的attention拼接在一起
x = self.proj(x) # w线性映射
x = self.proj_drop(x) # 再经过第二个dorpout
return x
attn,开始等于Q乘K的转置,然后在除根号d
然后再针对atten矩阵的每一行进行softmax
为什么要在每一行上进行softmax?
从图中可以看出,atten的每一行都代表了每个样本与其他样本计算atten的得分
然后再经过一个dropout
最后多头注意力的计算结果要经过reshape拼接在一起
MLP
主要用于实现encoder block中mlp block的部分
这里是一个简单的前向结构
需要注意的是,第一个liner层的节点(768)个数是输入层节点个数的4倍,第二个liner层把节点个数还原回去
class Mlp(nn.Module):
"""
MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
# 输入通道数
hidden_features = hidden_features or in_features
# 第一个全连接层的输出通道数
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer() # 激活函数使用了GELU
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
Block
主要用于实现整个encoder block的这个部分
class Block(nn.Module):
def __init__(self,
dim,
num_heads,
mlp_ratio=4., # 第一个全连接层的节点个数是输入节点个数的4倍
qkv_bias=False,
qk_scale=None,
drop_ratio=0., # multi-head-self-attention 最后w0,softmax后全连接层使用的drop ratio
attn_drop_ratio=0., # 这是个QKV计算softmax之后全连接层的drop ratio
drop_path_ratio=0., # encoder block 中的两个drop out
act_layer=nn.GELU,
norm_layer=nn.LayerNorm):
super(Block, self).__init__()
self.norm1 = norm_layer(dim) # 第一个layernorm
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio) # dim*4 mlp的第一个全连接层节点个数
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
VisionTransformer
搭建整个VIT
class VisionTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_c=3, num_classes=1000,
embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True,
qk_scale=None, representation_size=None, distilled=False, drop_ratio=0.,
attn_drop_ratio=0., drop_path_ratio=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_c (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer,重读堆叠encoder block的次数
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_ratio (float): dropout rate
attn_drop_ratio (float): attention dropout rate
drop_path_ratio (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
"""
super(VisionTransformer, self).__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1 # distilled = none
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) # partical传入默认参数eps
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_c=in_c, embed_dim=embed_dim) # patch embedding
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # cls token 通过nn.Parameter创建一个可训练的参数 1:batch维度方便拼接 1,768classtoken的维度
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None # None不用管
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) # 197*768 和concat之后的维度是一样的
self.pos_drop = nn.Dropout(p=drop_ratio) # 加上poition embdding之后的dorpout层
dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)] # stochastic depth decay rule encoder block中的droppath radio
# 每一个VIT block中的drop path ratio 是默认递增的
self.blocks = nn.Sequential(*[
Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, drop_path_ratio=dpr[i],
norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)
]) # 重复堆叠 encoder block *的作用用于解包
self.norm = norm_layer(embed_dim)
# endoer之后的 layer_norm
# Representation layer
if representation_size and not distilled: # False
self.has_logits = True
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
("fc", nn.Linear(embed_dim, representation_size)),
("act", nn.Tanh())
]))
else:
self.has_logits = False
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
# Weight init
nn.init.trunc_normal_(self.pos_embed, std=0.02)
if self.dist_token is not None:
nn.init.trunc_normal_(self.dist_token, std=0.02)
nn.init.trunc_normal_(self.cls_token, std=0.02)
self.apply(_init_vit_weights)
def forward_features(self, x):
# [B, C, H, W] -> [B, num_patches, embed_dim]
x = self.patch_embed(x) # [B, 196, 768]
# [1, 1, 768] -> [B, 1, 768]
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1) # [B, 197, 768] 分类维度拼接
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
# 提取cls token的输出
if self.dist_token is None:
# pre_logits 是 indentity 层
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None: # False
x, x_dist = self.head(x[0]), self.head_dist(x[1])
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x) #liner 直接执行这里
return x