python torch exp_Python torch.add方法代码示例

本文详细介绍了Python中torch.add方法的使用,通过16个代码示例展示其功能,包括矩阵相加、张量操作等。适用于正在学习torch库的开发者,帮助理解torch.add的具体应用。
摘要由CSDN通过智能技术生成

本文整理汇总了Python中torch.add方法的典型用法代码示例。如果您正苦于以下问题:Python torch.add方法的具体用法?Python torch.add怎么用?Python torch.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块torch的用法示例。

在下文中一共展示了torch.add方法的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _transform

​点赞 6

# 需要导入模块: import torch [as 别名]

# 或者: from torch import add [as 别名]

def _transform(x, mat, maxmin):

rot = mat[:,0:3]

trans = mat[:,3:6]

x = x.contiguous().view(-1, x.size()[1] , x.size()[2] * x.size()[3])

max_val, min_val = maxmin[:,0], maxmin[:,1]

max_val, min_val = max_val.contiguous().view(-1,1), min_val.contiguous().view(-1,1)

max_val, min_val = max_val.repeat(1,3), min_val.repeat(1,3)

trans, rot = _trans_rot(trans, rot)

x1 = torch.matmul(rot,x)

min_val1 = torch.cat((min_val, Variable(min_val.data.new(min_val.size()[0], 1).fill_(1))), dim=-1)

min_val1 = min_val1.unsqueeze(-1)

min_val1 = torch.matmul(trans, min_val1)

min_val = torch.div( torch.add(torch.matmul(rot, min_val1).squeeze(-1), - min_val), torch.add(max_val, - min_val))

min_val = min_val.mul_(255)

x = torch.add(x1, min_val.unsqueeze(-1))

x = x.contiguous().view(-1,3, 224,224)

return x

开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:26,

示例2: get_loss

​点赞 6

# 需要导入模块: import torch [as 别名]

# 或者: from torch import add [as 别名]

def get_loss(pred, y, criterion, mtr, a=0.5):

"""

To calculate loss

:param pred: predicted value

:param y: actual value

:param criterion: nn.CrossEntropyLoss

:param mtr: beta matrix

"""

mtr_t = torch.transpose(mtr, 1, 2)

aa = torch.bmm(mtr, mtr_t)

loss_fn = 0

for i in range(aa.size()[0]):

aai = torch.add(aa[i, ], Variable(torch.neg(torch.eye(mtr.size()[1]))))

loss_fn += torch.trace(torch.mul(aai, aai).data)

loss_fn /= aa.size()[0]

loss = torch.add(criterion(pred, y), Variable(torch.FloatTensor([loss_fn * a])))

return loss

开发者ID:BarnesLab,项目名称:Patient2Vec,代码行数:19,

示例3: forward

​点赞 6

# 需要导入模块: import torch [as 别名]

# 或者: from torch import add [as 别名]

def forward(self, x):

if not self.equalInOut:

x = self.relu1(self.bn1(x))

else:

out = self.relu1(self.bn1(x))

if self.equalInOut:

out = self.relu2(self.bn2(self.conv1(out)))

else:

out = self.relu2(self.bn2(self.conv1(x)))

if self.droprate > 0:

out = F.dropout(out, p=self.droprate, training=self.training)

out = self.conv2(out)

if

def forward(self, l, ab, y, idx=None): K = int(self.params[0].item()) T = self.params[1].item() Z_l = self.params[2].item() Z_ab = self.params[3].item() momentum = self.params[4].item() batchSize = l.size(0) outputSize = self.memory_l.size(0) # the number of sample of memory bank inputSize = self.memory_l.size(1) # the feature dimensionality # score computation if idx is None: # 用 AliasMethod 为 batch 里的每个样本都采样 4096 个负样本的 idx idx = self.multinomial.draw(batchSize * (self.K + 1)).view(batchSize, -1) # sample positives and negatives idx.select(1, 0).copy_(y.data) # sample weight_l = torch.index_select(self.memory_l, 0, idx.view(-1)).detach() weight_l = weight_l.view(batchSize, K + 1, inputSize) out_ab = torch.bmm(weight_l, ab.view(batchSize, inputSize, 1)) # sample weight_ab = torch.index_select(self.memory_ab, 0, idx.view(-1)).detach() weight_ab = weight_ab.view(batchSize, K + 1, inputSize) out_l = torch.bmm(weight_ab, l.view(batchSize, inputSize, 1)) if self.use_softmax: out_ab = torch.div(out_ab, T) out_l = torch.div(out_l, T) out_l = out_l.contiguous() out_ab = out_ab.contiguous() else: out_ab = torch.exp(torch.div(out_ab, T)) out_l = torch.exp(torch.div(out_l, T)) # set Z_0 if haven't been set yet, # Z_0 is used as a constant approximation of Z, to scale the probs if Z_l < 0: self.params[2] = out_l.mean() * outputSize Z_l = self.params[2].clone().detach().item() print("normalization constant Z_l is set to {:.1f}".format(Z_l)) if Z_ab < 0: self.params[3] = out_ab.mean() * outputSize Z_ab = self.params[3].clone().detach().item() print("normalization constant Z_ab is set to {:.1f}".format(Z_ab)) # compute out_l, out_ab out_l = torch.div(out_l, Z_l).contiguous() out_ab = torch.div(out_ab, Z_ab).contiguous() # # update memory with torch.no_grad(): l_pos = torch.index_select(self.memory_l, 0, y.view(-1)) l_pos.mul_(momentum) l_pos.add_(torch.mul(l, 1 - momentum)) l_norm = l_pos.pow(2).sum(1, keepdim=True).pow(0.5) updated_l = l_pos.div(l_norm) self.memory_l.index_copy_(0, y, updated_l) ab_pos = torch.index_select(self.memory_ab, 0, y.view(-1)) ab_pos.mul_(momentum) ab_pos.add_(torch.mul(ab, 1 - momentum)) ab_norm = ab_pos.pow(2).sum(1, keepdim=True).pow(0.5) updated_ab = ab_pos.div(ab_norm) self.memory_ab.index_copy_(0, y, updated_ab) return out_l, out_ab
最新发布
04-19
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值