源码网址 :
github
网络模型定义:
model = ViT( # 对此处设置断点1 进行调试观察
image_size = 256,
patch_size = 32,
num_classes = 20,
dim = 1024,
depth = 6,
heads = 16,
mlp_dim = 2048,
dropout = 0.1,
emb_dropout = 0.1
)
img = torch.randn(5, 3, 256, 256) # 相当于batchsize为5 即输入5张256*256*3的图片
preds =model(img) # 对此处设置断点2 进行调试观察
分阶段调试查看数据维度变化
断点1调试
数据大小或维度以注释给出
class ViT(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):
super().__init__()
image_height, image_width = pair(image_size) # 256 256
patch_height, patch_width = pair(patch_size) # 32 32
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_height // patch_height) * (image_width // patch_width) # 64
patch_dim = channels * patch_height * patch_width # 3072
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width), # 5*3*256*256 to 5*64*3072
nn.Linear(patch_dim, dim), # 5*64*3072 to 5*64*1024
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) # 1 64+1 1024
self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) # 1 1 1024
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
断点2调试
重点
输入为:img = torch.randn(5, 3, 256, 256)
==ViT的forward函数==
def forward(self, img): # 5 3 256 256
x = self.to_patch_embedding(img) # 5 64 1024
b, n, _ = x.shape # 5 64 1024
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) # 5 1 1024
x = torch.cat((cls_tokens, x), dim=1) # 5 65 1024
#self.pos_embedding 1 65 1024
x += self.pos_embedding[:, :(n + 1)] # 5 65 1024
x = self.dropout(x)
x = self.transformer(x) # 看transformer的forward函数
==transformer的forward==
def forward(self, x): # 5 65 1024
for attn, ff in self.layers:
x = attn(x) + x # 5 65 1024 Attention网络
# ==attention的forward==
def forward(self, x): # # 5 65 1024
# self.to_qkv(x) 5 65 3072
qkv = self.to_qkv(x).chunk(3, dim = -1) # 含3个张量的tuple 每个张量为5 65 1024
# self.heads为16
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) #将每个张量由 5*65*1024 变为 5*16*65*64
# self.scale 0.125 k.transpose(-1, -2) 5 16 64 65
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale # 5 16 65 65
attn = self.attend(dots) # 5 16 65 65
out = torch.matmul(attn, v) # 5 16 65 64
out = rearrange(out, 'b h n d -> b n (h d)') # 5 65 1024
return self.to_out(out) # 5 65 1024
x = ff(x) + x # 5 65 1024 全连接网络
return x # 5 65 1024
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] # 5 1024
x = self.to_latent(x) # 5 1024
return self.mlp_head(x) # 5 20 类别数