最近实验需要用到vision transformer的一些代码,之前对vit内部具体的代码实现细节还有些模糊,今天重新读了一遍代码并注释。
用的代码是vit-pytorch库中的。
github地址:vit-pytorch github
安装方法:
pip install vit-pytorch
代码:
class ViT(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls',
channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
# 输入的image size和patch size通过一个pair函数转换成tuple, 获得输入image和patch的长宽
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
# image的大小需要刚好被patch大小整除 这样可以分割为整数个patch
num_patches = (image_height // patch_height) * (image_width // patch_width)
# 获取可以分割的patch数量
patch_dim = channels * patch_height * patch_width
# patch的维度 一个patch包含多少个像素点
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
# 特殊token
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.Linear(patch_dim, dim),
)
# Rearrange是改变维度的一个函数
# 将 b c (h patch_height) (w patch_width) 转换为 b (h w) (patch_weight patch_width c)
# image本身的输入维度是 b c image_size image_size
# 32 3 224 224 转换为 32 3 (7 32) (7 32)
# 再通过函数转换为 32 (7 7) (32 32 3)
# 相当于把image分割成的patch都取出来
# 将分割好的patch映射到指定dim的一个维度
# 最后处理后变成 32*49*dim
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
# patches+1 这个1给cls token编码
# 位置编码 nn.Parameter代表可训练 并将这个参数绑定到module里面
# 位置编码靠自己训练 维度是dim 与patch直接相加
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
# 特殊token 分类用 直接拼接到头部
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
# 定义一个transformer模型
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
# mlp层 层归一化+mlp
def forward(self, img):
x = self.to_patch_embedding(img)
# 将整个image输入进去,直接输出patch的转换结果
# 输出 b*num_patches*dim
b, n, _ = x.shape
# b num_patches
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
# repeat函数增加维度 把b处 batch size维度增加到b倍数
# 也就是将特殊cls token扩大batch size倍 使得每个batch都有一个cls token
x = torch.cat((cls_tokens, x), dim=1)
# 直接将cls token拼接到头部
x += self.pos_embedding[:, :(n + 1)]
# 位置编码直接加上去
x = self.dropout(x)
x = self.transformer(x)
# x b*num_patches*dim放到transformer中
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
# 池化
x = self.to_latent(x)
return self.mlp_head(x)