pytorch 计算topk_Python torch 模块,topk() 实例源码 - 编程字典

这段代码展示了如何在PyTorch中使用`torch.topk()`来找到输入矩阵的最近邻。它包括计算余弦相似度、应用softmax以及在预测和训练模式下计算损失函数的步骤。
摘要由CSDN通过智能技术生成

def query(self, x, y, predict=False):

"""

Compute the nearest neighbor of the input queries.

Arguments:

x: A normalized matrix of queries of size (batch_size x key_dim)

y: A matrix of correct labels (batch_size x 1)

Returns:

y_hat, A (batch-size x 1) matrix

- the nearest neighbor to the query in memory_size

softmax_score, A (batch_size x 1) matrix

- A normalized score measuring the similarity between query and nearest neighbor

loss - average loss for memory module

"""

batch_size, dims = x.size()

query = F.normalize(self.query_proj(x), dim=1)

#query = F.normalize(torch.matmul(x, self.query_proj), dim=1)

# Find the k-nearest neighbors of the query

scores = torch.matmul(query, torch.t(self.keys_var))

cosine_similarity, topk_indices_var = torch.topk(scores, self.top_k, dim=1)

# softmax of cosine similarities - embedding

softmax_score = F.softmax(self.softmax_temperature * cosine_similarity)

# retrive memory values - prediction

topk_indices = topk_indices_var.detach().data

y_hat_indices = topk_indices[:, 0]

y_hat = self.values[y_hat_indices]

loss = None

if not predict:

# Loss Function

# topk_indices = (batch_size x topk)

# topk_values = (batch_size x topk x value_size)

# collect the memory values corresponding to the topk scores

batch_size, topk_size = topk_indices.size()

flat_topk = flatten(topk_indices)

flat_topk_values = self.values[topk_indices]

topk_values = flat_topk_values.resize_(batch_size, topk_size)

correct_mask = torch.eq(topk_values, torch.unsqueeze(y.data, dim=1)).float()

correct_mask_var = ag.Variable(correct_mask, requires_grad=False)

pos_score, pos_idx = torch.topk(torch.mul(cosine_similarity, correct_mask_var), 1, dim=1)

neg_score, neg_idx = torch.topk(torch.mul(cosine_similarity, 1-correct_mask_var), 1, dim=1)

# zero-out correct scores if there are no correct values in topk values

mask = 1.0 - torch.eq(torch.sum(correct_mask_var, dim=1), 0.0).float()

pos_score = torch.mul(pos_score, torch.unsqueeze(mask, dim=1))

#print(pos_score, neg_score)

loss = MemoryLoss(pos_score, neg_score, self.margin)

# Update memory

self.update(query, y, y_hat, y_hat_indices)

return y_hat, softmax_score, loss

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值