PPQ中finetune使用demo

from typing import Iterable
import torch
import torchvision
from ppq import (QuantizationSettingFactory,TargetPlatform,graphwise_error_analyse)
from ppq.api import QuantizationSettingFactory,quantize_torch_model
from ppq.api.interface import ENABLE_CUDA_KERNEL
from ppq.executor.torch import TorchExecutor
# 在PPQ中目前有两种不同的算法微调网络
# 这些算法将使用calibration dataset中的数据,对网络权重展开重训练
# 1.经过训练的网络不保证中间结果与原来能够对齐,在执行误差分析时需注意这一点
# 2.在训练过程中使用with ENABLE_CUDA_KERNEL(): 子句将显著加速训练过程
# 3.训练过程的缓存数据将被贮存在gpu上,这可能导致你显存溢出,你可以修改参数将缓存设备改为cpu

BATCHSIZE = 32
INPUT_SHAPE = [BATCHSIZE,3,224,224]
DEVICE = 'cuda'
PLATFORM = TargetPlatform.PPL_CUDA_INT8
def load_calibration_dataset() -> Iterable:
	return [torch.rand(size=INPUT_SHAPE) for _ in range(32)]
CALIBRATION = load_calibration_dataset()
def collate_fn(batch: torch.Tensor) -> torch.Tensor:
	return batch.to(DEVICE)
model = torchvision.models.mobilenet.mobilenet_v2(pretrained=True)
model = model.to(DEVICE)
# PPQ提供基于LSQ的网络微调,这是推荐的做法
# 使用Quant Setting来调用微调过程,并调整微调参数
QSetting = QuantizationSettingFactory.default_setting()
# 执行网络微调
QSetting.lsq_optimization = True
# 图切割的块大小
QSetting.lsq_optimization_setting.block_size = 4
# 初始化学习率,默认Adam优化器和一个具有0.1衰减的multistep scheduler 
QSetting.lsq_optimization_setting.lr = 1e-5
# 正则化
QSetting.lsq_optimization_setting.gamma = 0
# 是规模可训练的?
QSetting.lsq_optimization_setting.is_scale_trainable = True
# 指定块输入输出设备
# 如果CUDA OOM错误,将其转为cpu
QSetting.lsq_optimization_setting.collecting_device = 'cuda'
with ENABLE_CUDA_KERNEL():
	quantized = quantize_torch_model(
		model=model,calib_dataloader=CALIBRATION,
		calib_steps=32,input_shape=INPUT_SHAPE,
		setting=QSetting,collate_fn=collate_fn,platform=PLATFORM,
		onnx_export_file='Output/model.onnx',device=DEVICE,verbose=0)
	graphwise_error_analyse(
		graph=quantized,
		running_device=DEVICE,
		dataloader=CALIBRATION,
		collate_fn=collate_fn)
model= torchvision.models.mobilenet.mobilenet_v2(pretrained=True)
model = model.to(DEVICE)
QSetting = QuantizationSettingFactory.default_setting()
with ENABLE_CUDA_KERNEL():
	quantized = quantize_torch_model(
		model=model,calib_dataloader=CALIBRATION,
		calib_steps=32,input_shape=INPUT_SHAPE,
		setting=QSetting,collate_fn=collate_fn,platform=PLATFORM,
		onnx_export_file='Output/model.onnx',device=DEVICE,verbose=0)
	graphwise_error_analyse(
		graph=quantized,
		running_device=DEVICE,
		dataloader=CALIBRATION,
		collate_fn=collate_fn)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值