pytorch nn.Sigmoid函数好像只支持绝对值小于1数的计算

激活函数sigmoid

input = torch.randn(2,3)
print(input)
tensor([[-0.2863,  0.1606, -0.6738],
        [ 0.3379, -0.5613,  0.3672]])
X = torch.tensor([[1,2,3],[2,3,4]])
Y = mod(X)
print(Y)
---------------------------------------------------------------------------

RuntimeError                              Traceback (most recent call last)

<ipython-input-39-dac9d1ea9cc0> in <module>()
      1 X = torch.tensor([[1,2,3],[2,3,4]])
----> 2 Y = mod(X)
      3 print(Y)


E:\soft2\annaconda\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    548             result = self._slow_forward(*input, **kwargs)
    549         else:
--> 550             result = self.forward(*input, **kwargs)
    551         for hook in self._forward_hooks.values():
    552             hook_result = hook(self, input, result)


E:\soft2\annaconda\lib\site-packages\torch\nn\modules\activation.py in forward(self, input)
    269 
    270     def forward(self, input):
--> 271         return torch.sigmoid(input)
    272 
    273 


RuntimeError: "sigmoid_cpu" not implemented for 'Long'
X = torch.tensor([[0.1,0.2,0.3],[0.2,0.3,0.4]])
print(X)
mod = nn.Sigmoid()
Y = mod(X)
y = mod(input)
print(Y)
print(y)
tensor([[0.1000, 0.2000, 0.3000],
        [0.2000, 0.3000, 0.4000]])
tensor([[0.5250, 0.5498, 0.5744],
        [0.5498, 0.5744, 0.5987]])
tensor([[0.4289, 0.5401, 0.3377],
        [0.5837, 0.3633, 0.5908]])
  • 以上例子说明nn的sigmoid函数不支持大于1的数
class DropBlock_Ske(nn.Module): def __init__(self, num_point, block_size=7): super(DropBlock_Ske, self).__init__() self.keep_prob = 0.0 self.block_size = block_size self.num_point = num_point self.fc_1 = nn.Sequential( nn.Linear(in_features=25, out_features=25, bias=True), nn.ReLU(inplace=True), nn.Linear(in_features=25, out_features=25, bias=True), ) self.fc_2 = nn.Sequential( nn.Linear(in_features=25, out_features=25, bias=True), nn.ReLU(inplace=True), nn.Linear(in_features=25, out_features=25, bias=True), ) self.sigmoid = nn.Sigmoid() def forward(self, input, keep_prob, A): # n,c,t,v self.keep_prob = keep_prob if not self.training or self.keep_prob == 1: return input n, c, t, v = input.size() input_attention_mean = torch.mean(torch.mean(input, dim=2), dim=1).detach() # 32 25 input_attention_max = torch.max(input, dim=2)[0].detach() input_attention_max = torch.max(input_attention_max, dim=1)[0] # 32 25 avg_out = self.fc_1(input_attention_mean) max_out = self.fc_2(input_attention_max) out = avg_out + max_out input_attention_out = self.sigmoid(out).view(n, 1, 1, self.num_point) input_a = input * input_attention_out input_abs = torch.mean(torch.mean( torch.abs(input_a), dim=2), dim=1).detach() input_abs = input_abs / torch.sum(input_abs) * input_abs.numel() gamma = 0.024 M_seed = torch.bernoulli(torch.clamp( input_abs * gamma, min=0, max=1.0)).to(device=input.device, dtype=input.dtype) M = torch.matmul(M_seed, A) M[M > 0.001] = 1.0 M[M < 0.5] = 0.0 mask = (1 - M).view(n, 1, 1, self.num_point) return input * mask * mask.numel() / mask.sum()
07-14
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值