【笔记】Transformer 中用于分类的cls_token:cls_token 是一个额外的可训练的 768 维嵌入向量,它被添加到输入序列的开头,这个向量用于捕捉整个图像的全局信息

Code:

import torch
import torch.nn as nn

class VisionTransformer(nn.Module):
    def __init__(self, img_size=224, patch_size=16, in_c=3, num_classes=1000,
                 embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True,
                 qk_scale=None, representation_size=None, distilled=False, drop_ratio=0.,
                 attn_drop_ratio=0., drop_path_ratio=0., embed_layer=nn.Conv2d, norm_layer=None,
                 act_layer=None):
        super(VisionTransformer, self).__init__()
        self.num_classes = num_classes
        self.num_features = self.embed_dim = embed_dim
        self.num_tokens = 2 if distilled else 1
        norm_layer = norm_layer or nn.LayerNorm
        act_layer = act_layer or nn.GELU

        self.patch_embed = embed_layer(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)
        num_patches = (img_size // patch_size) ** 2

        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
        self.pos_drop = nn.Dropout(p=drop_ratio)

        dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)]
        self.blocks = nn.Sequential(*[
            nn.TransformerEncoderLayer(embed_dim, num_heads, int(embed_dim * mlp_ratio), drop_ratio)
            for i in range(depth)
        ])
        self.norm = norm_layer(embed_dim)

        if representation_size and not distilled:
            self.has_logits = True
            self.num_features = representation_size
            self.pre_logits = nn.Sequential(nn.Linear(embed_dim, representation_size), nn.Tanh())
        else:
            self.has_logits = False
            self.pre_logits = nn.Identity()

        self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
        self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if distilled else None

        nn.init.trunc_normal_(self.pos_embed, std=0.02)
        if self.dist_token is not None:
            nn.init.trunc_normal_(self.dist_token, std=0.02)
        nn.init.trunc_normal_(self.cls_token, std=0.02)
        self.apply(self._init_vit_weights)

    def _init_vit_weights(self, module):
        if isinstance(module, nn.Linear):
            nn.init.trunc_normal_(module.weight, std=0.02)
            if isinstance(module, nn.Linear) and module.bias is not None:
                nn.init.constant_(module.bias, 0)
        elif isinstance(module, nn.LayerNorm):
            nn.init.constant_(module.bias, 0)
            nn.init.constant_(module.weight, 1.0)

    def forward_features(self, x):
        B = x.shape[0]
        x = self.patch_embed(x).flatten(2).transpose(1, 2)  # [B, 196, 768]

        cls_tokens = self.cls_token.expand(B, -1, -1)  # [1, 1, 768] -> [B, 1, 768]
        if self.dist_token is not None:
            dist_tokens = self.dist_token.expand(B, -1, -1)  # [1, 1, 768] -> [B, 1, 768]
            x = torch.cat((cls_tokens, dist_tokens, x), dim=1)  # [B, 197, 768] or [B, 198, 768]
        else:
            x = torch.cat((cls_tokens, x), dim=1)  # [B, 197, 768]

        x = self.pos_drop(x + self.pos_embed)
        x = self.blocks(x)
        x = self.norm(x)
        if self.dist_token is None:
            return self.pre_logits(x[:, 0])
        else:
            return x[:, 0], x[:, 1]

    def forward(self, x):
        x = self.forward_features(x)
        if self.head_dist is not None:
            x, x_dist = self.head(x[0]), self.head_dist(x[1])
            if self.training:
                return x, x_dist
            else:
                return (x + x_dist) / 2
        else:
            x = self.head(x)
        return x

# 示例使用
model = VisionTransformer()
input_tensor = torch.randn(1, 3, 224, 224)
output = model(input_tensor)
print(output.shape)  # [1, 1000]

初始化 cls_token:

在模型初始化过程中,cls_token 被定义为形状为 (1, 1, embed_dim) 的可训练参数。
nn.Parameter(torch.zeros(1, 1, embed_dim)) 将其初始化为全零张量,并转换为模型的可训练参数。


扩展 cls_token:

在前向传播过程中,cls_token 被扩展到与批量大小一致的形状,即 [B, 1, embed_dim]。
cls_token = self.cls_token.expand(B, -1, -1)。
连接 cls_token:

将扩展后的 cls_token 添加到输入 patch 嵌入序列的开头。
x = torch.cat((cls_tokens, x), dim=1)。
添加位置嵌入:

位置嵌入 self.pos_embed 被添加到输入序列中,x = self.pos_drop(x + self.pos_embed)。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

程序猿的探索之路

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值