output = torch.Tensor.scatter_(dim, index, src)

dim = 0, 按照数值方向操作;

dim = 1, 按照水平方向操作;

a = torch.rand(2, 5)
print(a)
b = torch.zeros(3, 5).scatter_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]), a)
print(b) 

output = torch.Tensor.scatter_(dimindexsrc)

a = torch.rand(2,5)
print(a)
tensor([[0.0548, 0.1293, 0.1842, 0.6538, 0.7267],
        [0.6978, 0.7296, 0.7779, 0.7206, 0.0884]])
b = torch.zeros(3,5).scatter_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]), a)
print()
print(b)
tensor([[0.0548, 0.7296, 0.7779, 0.6538, 0.7267],
        [0.0000, 0.1293, 0.0000, 0.7206, 0.0000],
        [0.6978, 0.0000, 0.1842, 0.0000, 0.0884]])

 有代码可知,scatter_函数中dim=0,即按照竖直方向从a中取数据,然后按照竖直方向将从a中取出的数据放入对应列的index行中。例如,取a的第一列第一行元素0.0548放入b中第一列中第index=0行中,即b[0,0]=0.0548;取a的第一列第二行元素0.6978放入b中第一列中第index=2行中,即b[2,0]=0.6978。其他a中的元素放入方式,和上述方法相同。

参考文献:

[1]https://zhuanlan.zhihu.com/p/59346637

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
class MLP(nn.Module): def __init__( self, input_size: int, output_size: int, n_hidden: int, classes: int, dropout: float, normalize_before: bool = True ): super(MLP, self).__init__() self.input_size = input_size self.dropout = dropout self.n_hidden = n_hidden self.classes = classes self.output_size = output_size self.normalize_before = normalize_before self.model = nn.Sequential( nn.Linear(self.input_size, n_hidden), nn.Dropout(self.dropout), nn.ReLU(), nn.Linear(n_hidden, self.output_size), nn.Dropout(self.dropout), nn.ReLU(), ) self.after_norm = torch.nn.LayerNorm(self.input_size, eps=1e-5) self.fc = nn.Sequential( nn.Dropout(self.dropout), nn.Linear(self.input_size, self.classes) ) self.output_layer = nn.Linear(self.output_size, self.classes) def forward(self, x): self.device = torch.device('cuda') # x = self.model(x) if self.normalize_before: x = self.after_norm(x) batch_size, length, dimensions = x.size(0), x.size(1), x.size(2) output = self.model(x) return output.mean(dim=1) class LabelSmoothingLoss(nn.Module): def __init__(self, size: int, smoothing: float, ): super(LabelSmoothingLoss, self).__init__() self.size = size self.criterion = nn.KLDivLoss(reduction="none") self.confidence = 1.0 - smoothing self.smoothing = smoothing def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: batch_size = x.size(0) if self.smoothing == None: return nn.CrossEntropyLoss()(x, target.view(-1)) true_dist = torch.zeros_like(x) true_dist.fill_(self.smoothing / (self.size - 1)) true_dist.scatter_(1, target.view(-1).unsqueeze(1), self.confidence) kl = self.criterion(torch.log_softmax(x, dim=1), true_dist) return kl.sum() / batch_size
06-14
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值