Code:
import torch
import torch.nn as nn
class VisionTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_c=3, num_classes=1000,
embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True,
qk_scale=None, representation_size=None, distilled=False, drop_ratio=0.,
attn_drop_ratio=0., drop_path_ratio=0., embed_layer=nn.Conv2d, norm_layer=None,
act_layer=None):
super(VisionTransformer, self).__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.num_tokens = 2 if distilled else 1
norm_layer = norm_layer or nn.LayerNorm
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)
num_patches = (img_size // patch_size) ** 2
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_ratio)
dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)]
self.blocks = nn.Sequential(*[
nn.TransformerEncoderLayer(embed_dim, num_heads, int(embed_dim * mlp_ratio), drop_ratio)
for i in range(depth)
])
self.norm = norm_layer(embed_dim)
if representation_size and not distilled:
self.has_logits = True
self.num_features = representation_size
self.pre_logits = nn.Sequential(nn.Linear(embed_dim, representation_size), nn.Tanh())
else:
self.has_logits = False
self.pre_logits = nn.Identity()
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if distilled else None
nn.init.trunc_normal_(self.pos_embed, std=0.02)
if self.dist_token is not None:
nn.init.trunc_normal_(self.dist_token, std=0.02)
nn.init.trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_vit_weights)
def _init_vit_weights(self, module):
if isinstance(module, nn.Linear):
nn.init.trunc_normal_(module.weight, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.LayerNorm):
nn.init.constant_(module.bias, 0)
nn.init.constant_(module.weight, 1.0)
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x).flatten(2).transpose(1, 2) # [B, 196, 768]
cls_tokens = self.cls_token.expand(B, -1, -1) # [1, 1, 768] -> [B, 1, 768]
if self.dist_token is not None:
dist_tokens = self.dist_token.expand(B, -1, -1) # [1, 1, 768] -> [B, 1, 768]
x = torch.cat((cls_tokens, dist_tokens, x), dim=1) # [B, 197, 768] or [B, 198, 768]
else:
x = torch.cat((cls_tokens, x), dim=1) # [B, 197, 768]
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
if self.dist_token is None:
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1])
if self.training:
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x
# 示例使用
model = VisionTransformer()
input_tensor = torch.randn(1, 3, 224, 224)
output = model(input_tensor)
print(output.shape) # [1, 1000]
初始化 cls_token:
在模型初始化过程中,cls_token 被定义为形状为 (1, 1, embed_dim) 的可训练参数。
nn.Parameter(torch.zeros(1, 1, embed_dim)) 将其初始化为全零张量,并转换为模型的可训练参数。
扩展 cls_token:
在前向传播过程中,cls_token 被扩展到与批量大小一致的形状,即 [B, 1, embed_dim]。
cls_token = self.cls_token.expand(B, -1, -1)。
连接 cls_token:
将扩展后的 cls_token 添加到输入 patch 嵌入序列的开头。
x = torch.cat((cls_tokens, x), dim=1)。
添加位置嵌入:
位置嵌入 self.pos_embed 被添加到输入序列中,x = self.pos_drop(x + self.pos_embed)。