from typing import Iterable
import torch
import torchvision
from ppq import (QuantizationSettingFactory,TargetPlatform,graphwise_error_analyse)
from ppq.api import QuantizationSettingFactory,quantize_torch_model
from ppq.api.interface import ENABLE_CUDA_KERNEL
from ppq.executor.torch import TorchExecutor
# 在PPQ中目前有两种不同的算法微调网络
# 这些算法将使用calibration dataset中的数据,对网络权重展开重训练
# 1.经过训练的网络不保证中间结果与原来能够对齐,在执行误差分析时需注意这一点
# 2.在训练过程中使用with ENABLE_CUDA_KERNEL(): 子句将显著加速训练过程
# 3.训练过程的缓存数据将被贮存在gpu上,这可能导致你显存溢出,你可以修改参数将缓存设备改为cpu
BATCHSIZE = 32
INPUT_SHAPE = [BATCHSIZE,3,224,224]
DEVICE = 'cuda'
PLATFORM = TargetPlatform.PPL_CUDA_INT8
def load_calibration_dataset() -> Iterable:
return [torch.rand(size=INPUT_SHAPE) for _ in range(32)]
CALIBRATION = load_calibration_dataset()
def collate_fn(batch: torch.Tensor) -> torch.Tensor:
return batch.to(DEVICE)
model = torchvision.models.mobilenet.mobilenet_v2(pretrained=True)
model = model.to(DEVICE)
# PPQ提供基于LSQ的网络微调,这是推荐的做法
# 使用Quant Setting来调用微调过程,并调整微调参数
QSetting = QuantizationSettingFactory.default_setting()
# 执行网络微调
QSetting.lsq_optimization = True
# 图切割的块大小
QSetting.lsq_optimization_setting.block_size = 4
# 初始化学习率,默认Adam优化器和一个具有0.1衰减的multistep scheduler
QSetting.lsq_optimization_setting.lr = 1e-5
# 正则化
QSetting.lsq_optimization_setting.gamma = 0
# 是规模可训练的?
QSetting.lsq_optimization_setting.is_scale_trainable = True
# 指定块输入输出设备
# 如果CUDA OOM错误,将其转为cpu
QSetting.lsq_optimization_setting.collecting_device = 'cuda'
with ENABLE_CUDA_KERNEL():
quantized = quantize_torch_model(
model=model,calib_dataloader=CALIBRATION,
calib_steps=32,input_shape=INPUT_SHAPE,
setting=QSetting,collate_fn=collate_fn,platform=PLATFORM,
onnx_export_file='Output/model.onnx',device=DEVICE,verbose=0)
graphwise_error_analyse(
graph=quantized,
running_device=DEVICE,
dataloader=CALIBRATION,
collate_fn=collate_fn)
model= torchvision.models.mobilenet.mobilenet_v2(pretrained=True)
model = model.to(DEVICE)
QSetting = QuantizationSettingFactory.default_setting()
with ENABLE_CUDA_KERNEL():
quantized = quantize_torch_model(
model=model,calib_dataloader=CALIBRATION,
calib_steps=32,input_shape=INPUT_SHAPE,
setting=QSetting,collate_fn=collate_fn,platform=PLATFORM,
onnx_export_file='Output/model.onnx',device=DEVICE,verbose=0)
graphwise_error_analyse(
graph=quantized,
running_device=DEVICE,
dataloader=CALIBRATION,
collate_fn=collate_fn)
PPQ中finetune使用demo
最新推荐文章于 2025-11-11 17:06:09 发布
该代码示例展示了如何使用PPQ库对预训练的MobilenetV2模型进行量化微调,特别是在CUDA设备上。通过LSQ优化,调整学习率、块大小等参数,然后进行误差分析。在可能出现CUDA内存溢出的情况下,可以将缓存设备改为CPU。
238

被折叠的 条评论
为什么被折叠?



