```python
nni接入mmlab
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import torch
import os.path as osp
import copy
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.runner import Runner
from torch.optim import Adam
from mmyolo.registry import RUNNERS
from mmyolo.utils import is_metainfo_lower
import nni
from nni.compression import TorchEvaluator
from nni.compression.pruning import SlimPruner, L1NormPruner
from nni.compression.utils import auto_set_denpendency_group_ids
from nni.compression.speedup import ModelSpeedup
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('--config', default="/home/lxt/open-mmlab/O-TRADE/algorithms/mmyolo/configs/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py", help='train config file path')
parser.add_argument('--work-dir', default="workspace/mmrazor/yolov5m", help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
# cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# Determine whether the custom metainfo fields are all lowercase
is_metainfo_lower(cfg)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
config_list = [{
'op_types': ['Conv2d'],
'sparse_ratio': 0.5,
'exclude_op_names': ['bbox_head.head_module.convs_pred.0','bbox_head.head_module.convs_pred.1', 'bbox_head.head_module.convs_pred.2', 'neck.out_layers.0', 'neck.out_layers.1', 'neck.out_layers.2','bbox_head.loss_cls','bbox_head.loss_bbox','bbox_head.loss_obj' ] }]
# runner.train()
model = copy.deepcopy(runner.model )
dummy_input_shape = cfg["img_scale"]
dummy_input = torch.rand(1, 3, dummy_input_shape[0], dummy_input_shape[1]).to("cuda")
config_list = auto_set_denpendency_group_ids(model, config_list, dummy_input)
optimize_params = [param for param in model.parameters() if param.requires_grad == True]
optimizer = nni.trace(Adam)(optimize_params, lr=0.001)
training_step = runner.train_loop
evaluator = TorchEvaluator(runner.train, optimizer, training_step)
# pruner = SlimPruner(model, config_list, evaluator, training_steps=100)
pruner = L1NormPruner(model, config_list,)
_, masks = pruner.compress()
pruner.unwrap_model()
model = ModelSpeedup(model, dummy_input, masks).speedup_model()
# print('Pruned model paramater number: ', sum([param.numel() for param in model.parameters()]))
# print('Pruned model without finetuning acc: ', evaluate(model, test_loader), '%')
#
# optimizer = prepare_optimizer(model)
# train(model, optimizer, training_step, lr_scheduler=None, max_steps=None, max_epochs=10)
# _, test_loader = prepare_dataloader()
# print('Pruned model after 10 epochs finetuning acc: ', evaluate(model, test_loader), '%')
# start training
runner.train()
if __name__ == '__main__':
main()