torch.round()

文章目录

torch.round(input, *, decimals=0, out=None)

函数作用

将输入元素舍入为最接近的整数。
注意⚠️:

  • 当一个数字与两个整数等距时(例如 round(2.5) 是 2),此函数实现“舍入一半到偶数”以打破平局。(zero is treated as even)
  • 参数 decimals :可正可负。 要舍入的小数位数(默认值:0)。如果小数为负数,则指定小数点左侧的位数。

举例

# 默认decimals=0
>>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
tensor([ 5.,  -2.,  9., -8.])

>>> # Values equidistant from two integers are rounded towards the
>>> #   the nearest even value (zero is treated as even)
>>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
tensor([-0., 0., 2., 2.])

>>> # A positive decimals argument rounds to the to that decimal place
>>> torch.round(torch.tensor([0.1234567]), decimals=3)
tensor([0.1230])

>>> # A negative decimals argument rounds to the left of the decimal
>>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
tensor([1000.])
# -*- coding: utf-8 -*- """ Created on Fri Mar 5 19:13:21 2021 @author: LXM """ import torch import torch.nn as nn from torch.autograd import Function class UpdateRange(nn.Module): def __init__(self, device): super(UpdateRange, self).__init__() self.device = device self.flag = 0 self.fmin = torch.zeros((1), dtype = torch.float32, device = self.device) self.fmax = torch.zeros((1), dtype = torch.float32, device = self.device) def Update(self, fmin, fmax): if self.flag == 0: self.flag = 1 new_fmin = fmin new_fmax = fmax else: new_fmin = torch.min(fmin, self.fmin) new_fmax = torch.max(fmax, self.fmax) self.fmin.copy_(new_fmin) self.fmax.copy_(new_fmax) @torch.no_grad() def forward(self, input): fmin = torch.min(input) fmax = torch.max(input) self.Update(fmin, fmax) class Round(Function): @staticmethod def forward(self, input): # output = torch.round(input) # output = torch.floor(input) output = input.int().float() return output @staticmethod def backward(self, output): input = output.clone() return input class Quantizer(nn.Module): def __init__(self, bits, device): super(Quantizer, self).__init__() self.bits = bits self.scale = 1 self.UpdateRange = UpdateRange(device) self.qmin = torch.tensor((-((1 << (bits - 1)) - 1)), device = device) self.qmax = torch.tensor((+((1 << (bits - 1)) - 1)), device = device) def round(self, input): output = Round.apply(input) return output def Quantization(self): quant_range = float(1 << (self.bits - 1)) float_range = torch.max(torch.abs(self.UpdateRange.fmin), torch.abs(self.UpdateRange.fmax)) scale = 1 for i in range(32): if torch.round(float_range * (1 << i)) < quant_range: scale = 1 << i else: break self.scale = scale def forward(self, input): if self.training: self.UpdateRange(input) self.Quantization() output = (torch.clamp(self.round(input * self.scale), self.qmin, self.qmax)) / self.scale return output
07-10
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值