STATIC QUANTIZATION WITH EAGER MODE IN PYTORCH

本文介绍了如何在PyTorch中使用Eager Mode进行静态量化,这是一种优化深度学习模型的方法,旨在减少内存消耗和提高推理速度。通过链接提供的详细教程,读者可以学习到量化的基本概念和实施步骤。
摘要由CSDN通过智能技术生成

关于pytorch的量化,可以看https://blog.csdn.net/zlgahu/article/details/104662203/

import numpy as np
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import os
import time
import sys
import torch.quantization
import requests
import warnings

#用于定义需要量化的参数
from torch.quantization import QuantStub,DeQuantStub

#确保通道数能被divisor整除
def _make_divisible(v,divisor,min_value=None):
	if min_value is None:
		min_value = divisor
	new_v = max(min_value,int(v + divisor / 2) // divisor * divisor)
	#防止损失过多通道
	if new_v < 0.9 * v:
		new_v += divisor
	return new_v
#定义几个网络的组合层
class ConvBNReLU(nn.Sequential):
	def __init__(self,in_planes,out_planes,kernel_size=3,stride=1,groups=1):
		padding = (kernel_size - 1) // 2
		super().__init__(
		nn.Conv2d(in_planes,out_planes,kernel_size,stride,padding,groups=groups,bias=False),
		nn.BatchNorm2d(out_planes,momentum=0.1),
		nn.ReLU(inplace=False)
		)
		
#定义残差块
class InvertedResidual(nn.Module):
	def __init__(self,inp,oup,stride,expand_ratio):
		super().__init__()
		self.stride = stride
		assert stride in [1,2]

		hidden_dim = int(round(inp * expand_ratio))
		self.use_res_connect = self.stride == 1 and inp == oup

		layers = []
		if expand_ratio != 1:
			layers.append(ConvBNReLU(inp,hidden_dim,kernel_size=1))

		layers.extend([
		ConvBNReLU(hidden_dim,hidden_dim,stride=stride,groups=hidden_dim),
		nn.Conv2d(hidden_dim,oup,1,1,0,bias),
		nn.BatchNorm2d(oup,momentum=0.1),
		])

		self.conv = nn.Sequential(*layers)
		#将无状态的相加操作转化为FloatFunctional,猜测是因为量化后加法操作不能正常运作
		self.skip_add = nn.quantized.FloadFunctional()
		
	def forward(self,x):
		if self.use_res_connect:
			return self.skip_add.add(x,self.conv(x))
		else:
			return self.conv(x)

#定义网络结构
class MobileNetV2(nn.Module):
	def __init__(self,num_classes=1000,width_mult=1.0,inverted_residual_setting=None,round_nearest=8):
		super().__init__()
		block = InvertedResidual
		input_channel = 32
		last_channel = 1280
		
		#定义网络结构参数
		if inverted_residual_setting is None:
			inverted_residual_setting = [
					[1, 16, 1, 1],
                	[6, 24, 2, 2],
                	[6, 32, 3, 2],
                	[6, 64, 4, 2],
                	[6, 96, 3, 1],
                	[6, 160, 3, 2],
                	[6, 320, 1, 1],
			]

		if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值