最近在做骨骼行为识别,在mmaction2框架时,无意中发现了pyskl这个框架,于是这几天就一直在研究这个,因为代码是分布式的,转到windows中途遇到很多问题,代码一运行就报千奇百怪的错,也看了很多博客,但是回答的不是很全,但是功夫不负有心人,最终还是跑起来了。
这里还要多谢几位志同道合的博主:
下面我写一下步骤和修改的代码,希望大家多多交流,如果有做相同方向的同学,我希望可以私信我。
1、准备制作数据集(avi和mp4都可)
我只是简单的跑了一下,所以就只用了总共40个视频,4种类别(climb:0,falldown:1,handup:2,walk:3),要把类别顺序先确定好。分成训练集和测试集,我是按8:2来分的.
类似这种“视频名类别”,每种类别在'_'之前可以随意命名,在'_'之后要按照类别顺序命名,方便后面代码处理。
2、下面这段代码放到主路径下
(代码整合到一起了),后面步骤要用到这段代码。
import os
import decord
import json
from mmcv import load, dump
from pyskl.smp import mwlines
def writeJson(path_train, jsonpath):
outpot_list = []
trainfile_list = os.listdir(path_train)
for train_name in trainfile_list:
traindit = {}
sp = train_name.split('_')
traindit['vid_name'] = train_name.replace('.mp4', '')
traindit['label'] = int(sp[1].replace('.mp4', ''))
traindit['start_frame'] = 0
video_path = os.path.join(path_train, train_name)
vid = decord.VideoReader(video_path)
traindit['end_frame'] = len(vid)
outpot_list.append(traindit.copy())
with open(jsonpath, 'w') as outfile:
json.dump(outpot_list, outfile)
def writeList(dirpath, name):
path_train = os.path.join(dirpath, 'train').replace("\\", "/")
path_test = os.path.join(dirpath, 'test').replace("\\", "/")
trainfile_list = os.listdir(path_train)
testfile_list = os.listdir(path_test)
train = []
for train_name in trainfile_list:
traindit = {}
sp = train_name.split('_')
traindit['vid_name'] = train_name
traindit['label'] = sp[1].replace('.mp4', '')
train.append(traindit)
test = []
for test_name in testfile_list:
testdit = {}
sp = test_name.split('_')
testdit['vid_name'] = test_name
testdit['label'] = sp[1].replace('.mp4', '')
test.append(testdit)
tmpl1 = os.path.join(path_train, '{}').replace("\\", "/")
lines1 = [(tmpl1 + ' {}').format(x['vid_name'], x['label']) for x in train]
tmpl2 = os.path.join(path_test, '{}').replace("\\", "/")
lines2 = [(tmpl2 + ' {}').format(x['vid_name'], x['label']) for x in test]
lines = lines1 + lines2
mwlines(lines, os.path.join(dirpath, name).replace("\\", "/"))
def traintest(dirpath, pklname, newpklname):
os.chdir(dirpath)
train = load('train.json')
test = load('test.json')
annotations = load(pklname)
split = dict()
split['xsub_train'] = [x['vid_name'] for x in train]
split['xsub_val'] = [x['vid_name'] for x in test]
dump(dict(split=split, annotations=annotations), newpklname)
if __name__ == '__main__':
# dirpath = './data/Weizmann'
# pklname = 'train.pkl'
# newpklname = 'Wei_xsub_stgn++.pkl'
writeJson('./data/Weizmann/train', 'train.json')
# traintest(dirpath, pklname, newpklname)
# writeList('./data/Weizmann', 'Weizmann.list')
运行两次writeJson函数在Weizmann下生成train.json和test.json,格式如下
[
{"vid_name": "climb32_0",
"label": 0,
"start_frame": 0,
"end_frame": 298}
]
3、运行writeList函数
在Weizmann下生成Weizmann.list文件,如下所示
./data/Weizmann/train/walk1_3.mp4 3
./data/Weizmann/train/walk2_3.mp4 3
./data/Weizmann/train/walk3_3.mp4 3
./data/Weizmann/train/walk4_3.mp4 3
./data/Weizmann/train/walk5_3.mp4 3
./data/Weizmann/train/walk6_3.mp4 3
./data/Weizmann/train/walk7_3.mp4 3
./data/Weizmann/train/walk8_3.mp4 3
./data/Weizmann/test/climb32_0.mp4 0
./data/Weizmann/test/climb34_0.mp4 0
./data/Weizmann/test/falldown66_1.mp4 1
./data/Weizmann/test/falldown75_1.mp4 1
./data/Weizmann/test/handsup21_2.mp4 2
./data/Weizmann/test/handsup22_2.mp4 2
./data/Weizmann/test/walk26_3.mp4 3
./data/Weizmann/test/walk27_3.mp4 3
4、生成pkl文件:2d骨架
分布式改为单GPU,这里先修改tools/data/custom_2d_skeleton.py文件,
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
# import pdb
import pyskl
from mmdet.apis import inference_detector, init_detector
from mmpose.apis import inference_top_down_pose_model, init_pose_model
import decord
import mmcv
import numpy as np
# import torch.distributed as dist
from tqdm import tqdm
import mmdet
# import mmpose
# from pyskl.smp import mrlines
import cv2
from pyskl.smp import mrlines
def extract_frame(video_path):
vid = decord.VideoReader(video_path)
return [x.asnumpy() for x in vid]
def detection_inference(model, frames):
model = model.cuda()
results = []
for frame in frames:
result = inference_detector(model, frame)
results.append(result)
return results
def pose_inference(model, frames, det_results):
model = model.cuda()
assert len(frames) == len(det_results)
total_frames = len(frames)
num_person = max([len(x) for x in det_results])
kp = np.zeros((num_person, total_frames, 17, 3), dtype=np.float32)
for i, (f, d) in enumerate(zip(frames, det_results)):
# Align input format
d = [dict(bbox=x) for x in list(d)]
pose = inference_top_down_pose_model(model, f, d, format='xyxy')[0]
for j, item in enumerate(pose):
kp[j, i] = item['keypoints']
return kp
pyskl_root = osp.dirname(pyskl.__path__[0])
default_det_config = f'{pyskl_root}/demo/faster_rcnn_r50_fpn_1x_coco-person.py'
default_det_ckpt = (
'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/'
'faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth')
default_pose_config = f'{pyskl_root}/demo/hrnet_w32_coco_256x192.py'
default_pose_ckpt = (
'https://download.openmmlab.com/mmpose/top_down/hrnet/'
'hrnet_w32_coco_256x192-c78dce93_20200708.pth')
def parse_args():
parser = argparse.ArgumentParser(
description='Generate 2D pose annotations for a custom video dataset')
# * Both mmdet and mmpose should be installed from source
# parser.add_argument('--mmdet-root', type=str, default=default_mmdet_root)
# parser.add_argument('--mmpose-root', type=str, default=default_mmpose_root)
# parser.add_argument('--det-config', type=str, default='../refe/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py')
# parser.add_argument('--det-ckpt', type=str,
# default='../refe/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth')
parser.add_argument(
'--det-config',
# default='../refe/faster_rcnn_r50_fpn_2x_coco.py',
default=default_det_config,
help='human detection config file path (from mmdet)')
parser.add_argument(
'--det-ckpt',
default=default_det_ckpt,
help='human detection checkpoint file/url')
parser.add_argument('--pose-config', type=str, default=default_pose_config)
parser.add_argument('--pose-ckpt', type=str, default=default_pose_ckpt)
# * Only det boxes with score larger than det_score_thr will be kept
parser.add_argument('--det-score-thr', type=float, default=0.7)
# * Only det boxes with large enough sizes will be kept,
parser.add_argument('--det-area-thr', type=float, default=1300)
# * Accepted formats for each line in video_list are:
# * 1. "xxx.mp4" ('label' is missing, the dataset can be used for inference, but not training)
# * 2. "xxx.mp4 label" ('label' is an integer (category index),
# * the result can be used for both training & testing)
# * All lines should take the same format.
parser.add_argument('--video-list', type=str, help='the list of source videos')
# * out should ends with '.pkl'
parser.add_argument('--out', type=str, help='output pickle name')
parser.add_argument('--tmpdir', type=str, default='tmp')
parser.add_argument('--local_rank', type=int, default=1)
# pdb.set_trace()
# if 'RANK' not in os.environ:
# os.environ['RANK'] = str(args.local_rank)
# os.environ['WORLD_SIZE'] = str(1)
# os.environ['MASTER_ADDR'] = 'localhost'
# os.environ['MASTER_PORT'] = '12345'
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out.endswith('.pkl')
lines = mrlines(args.video_list)
lines = [x.split() for x in lines]
assert len(lines[0]) in [1, 2]
if len(lines[0]) == 1:
annos = [dict(frame_dir=osp.basename(x[0]).split('.')[0], filename=x[0]) for x in lines]
else:
annos = [dict(frame_dir=osp.basename(x[0]).split('.')[0], filename=x[0], label=int(x[1])) for x in lines]
rank = 0 # 添加该
world_size = 1 # 添加
# init_dist('pytorch', backend='nccl')
# rank, world_size = get_dist_info()
#
# if rank == 0:
# os.makedirs(args.tmpdir, exist_ok=True)
# dist.barrier()
my_part = annos
# my_part = annos[rank::world_size]
print("from det_model")
det_model = init_detector(args.det_config, args.det_ckpt, 'cuda')
assert det_model.CLASSES[0] == 'person', 'A detector trained on COCO is required'
print("from pose_model")
pose_model = init_pose_model(args.pose_config, args.pose_ckpt, 'cuda')
n = 0
for anno in tqdm(my_part):
frames = extract_frame(anno['filename'])
print("anno['filename", anno['filename'])
det_results = detection_inference(det_model, frames)
# * Get detection results for human
det_results = [x[0] for x in det_results]
for i, res in enumerate(det_results):
# * filter boxes with small scores
res = res[res[:, 4] >= args.det_score_thr]
# * filter boxes with small areas
box_areas = (res[:, 3] - res[:, 1]) * (res[:, 2] - res[:, 0])
assert np.all(box_areas >= 0)
res = res[box_areas >= args.det_area_thr]
det_results[i] = res
pose_results = pose_inference(pose_model, frames, det_results)
shape = frames[0].shape[:2]
anno['img_shape'] = anno['original_shape'] = shape
anno['total_frames'] = len(frames)
anno['num_person_raw'] = pose_results.shape[0]
anno['keypoint'] = pose_results[..., :2].astype(np.float16)
anno['keypoint_score'] = pose_results[..., 2].astype(np.float16)
anno.pop('filename')
mmcv.dump(my_part, osp.join(args.tmpdir, f'part_{rank}.pkl'))
# dist.barrier()
if rank == 0:
parts = [mmcv.load(osp.join(args.tmpdir, f'part_{i}.pkl')) for i in range(world_size)]
rem = len(annos) % world_size
if rem:
for i in range(rem, world_size):
parts[i].append(None)
ordered_results = []
for res in zip(*parts):
ordered_results.extend(list(res))
ordered_results = ordered_results[:len(annos)]
mmcv.dump(ordered_results, args.out)
if __name__ == '__main__':
# default_mmdet_root = osp.dirname(mmcv.__path__[0])
# default_mmpose_root = osp.dirname(mmcv.__path__[0])
main()
分布式改为单GPU,这里先修改pyskl/apis/train.py文件,
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import os
import os.path as osp
import time
import torch
# import torch.distributed as dist
from mmcv.engine import single_gpu_test # 替换为单机测试函数
from mmcv.parallel import MMDataParallel
from mmcv.runner import EpochBasedRunner, OptimizerHook, build_optimizer, get_dist_info
from mmdet.core import EvalHook
from ..core import DistEvalHook # 可能需要调整为非分布式评估hook
from ..datasets import build_dataloader, build_dataset
from ..utils import cache_checkpoint, get_root_logger
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed.
If the seed is not set, the seed will be automatically randomized.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# 单机模式下直接生成随机种子,无需考虑分布式的同步问题
seed = np.random.randint(2**31)
return seed
def train_model(model,
dataset,
cfg,
validate=False,
test=dict(test_best=False, test_last=False),
timestamp=None,
meta=None):
"""Train model entry function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
validate (bool): Whether to do evaluation. Default: False.
test (dict): The testing option, with two keys: test_last & test_best.
The value is True or False, indicating whether to test the
corresponding checkpoint.
Default: dict(test_best=False, test_last=False).
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None
"""
logger = get_root_logger(log_level=cfg.get('log_level', 'INFO'))
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
persistent_workers=cfg.data.get('persistent_workers', False),
seed=cfg.seed)
dataloader_setting.update(cfg.data.get('train_dataloader', {}))
data_loaders = [
build_dataloader(ds, **dataloader_setting) for ds in dataset
]
# put model on gpus
model = MMDataParallel(model.cuda())
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
Runner = EpochBasedRunner
runner = Runner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
runner.timestamp = timestamp
if 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# 注册hooks(假设不再使用DistSamplerSeedHook)
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
eval_hook = None
if validate:
eval_cfg = cfg.get('evaluation', {})
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
dataloader_setting['shuffle'] = False
val_dataloader = build_dataloader(val_dataset, **dataloader_setting)
eval_hook = EvalHook(val_dataloader, **eval_cfg) # 替换为非分布式评估hook
runner.register_hook(eval_hook)
if cfg.get('resume_from', None):
runner.resume(cfg.resume_from)
elif cfg.get('load_from', None):
cfg.load_from = cache_checkpoint(cfg.load_from)
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
# 删除与分布式同步相关的barrier和延时等待
# time.sleep(2)
if test['test_last'] or test['test_best']:
best_ckpt_path = None
if test['test_best']:
assert eval_hook is not None
best_ckpt_path = None
ckpt_paths = [x for x in os.listdir(cfg.work_dir) if 'best' in x]
ckpt_paths = [x for x in ckpt_paths if x.endswith('.pth')]
if len(ckpt_paths) == 0:
logger.info('Warning: test_best set, but no ckpt found')
test['test_best'] = False
if not test['test_last']:
return
elif len(ckpt_paths) > 1:
epoch_ids = [
int(x.split('epoch_')[-1][:-4]) for x in ckpt_paths
]
best_ckpt_path = ckpt_paths[np.argmax(epoch_ids)]
else:
best_ckpt_path = ckpt_paths[0]
if best_ckpt_path:
best_ckpt_path = osp.join(cfg.work_dir, best_ckpt_path)
test_dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting['shuffle'] = False
test_dataloader = build_dataloader(test_dataset, **dataloader_setting)
names, ckpts = [], []
if test['test_last']:
names.append('last')
ckpts.append(None)
if test['test_best']:
names.append('best')
ckpts.append(best_ckpt_path)
for name, ckpt in zip(names, ckpts):
if ckpt is not None:
runner.load_checkpoint(ckpt)
# 将multi_gpu_test替换为single_gpu_test以适应非分布式场景
outputs = single_gpu_test(runner.model, test_dataloader)
if name == 'last' or name == 'best':
out = osp.join(cfg.work_dir, f'{name}_pred.pkl')
test_dataset.dump_results(outputs, out)
eval_cfg = cfg.get('evaluation', {})
for key in [
'interval', 'tmpdir', 'start',
'save_best', 'rule', 'by_epoch', 'broadcast_bn_buffers'
]:
eval_cfg.pop(key, None)
eval_res = test_dataset.evaluate(outputs, **eval_cfg)
logger.info(f'Testing results of the {name} checkpoint')
for metric_name, val in eval_res.items():
logger.info(f'{metric_name}: {val:.04f}')
在控制台输入:
python tools/data/custom_2d_skeleton.py --video-list ./tools/data/Weizmann/Weizmann.list --out . /tools/data/Weizmann/train.pkl
这一步不要忘了
运行第二步给的一段代码traintest函数,生成训练要用的My_xsub_stgn++.pkl文件
5、训练
选定要使用的模型,我选择了stgcn++,使用了configs/stgcn++/stgcn++_ntu120_xsub_hrnet/j.py,改了一下
# num_classes=4 改成自己数据集的类别数量,我的是11
model = dict(
type='RecognizerGCN',
backbone=dict(
type='STGCN',
gcn_adaptive='init',
gcn_with_res=True,
tcn_type='mstcn',
graph_cfg=dict(layout='coco', mode='spatial')),
cls_head=dict(type='GCNHead', num_classes=11, in_channels=256))
dataset_type = 'PoseDataset'
# ann_file,改成上面存放pkl文件的路径
ann_file = 'data/Weizmann/My_xsub_stgn++.pkl'
# 下面的train_pipeline、val_pipeline和test_pipeline中num_person可以改成1,我猜是视频中人的数
# 量,但是没有证据
train_pipeline = [
dict(type='PreNormalize2D'),
dict(type='GenSkeFeat', dataset='coco', feats=['j']),
dict(type='UniformSample', clip_len=100),
dict(type='PoseDecode'),
dict(type='FormatGCNInput', num_person=1),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
val_pipeline = [
dict(type='PreNormalize2D'),
dict(type='GenSkeFeat', dataset='coco', feats=['j']),
dict(type='UniformSample', clip_len=100, num_clips=1, test_mode=True),
dict(type='PoseDecode'),
dict(type='FormatGCNInput', num_person=1),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
test_pipeline = [
dict(type='PreNormalize2D'),
dict(type='GenSkeFeat', dataset='coco', feats=['j']),
dict(type='UniformSample', clip_len=100, num_clips=10, test_mode=True),
dict(type='PoseDecode'),
dict(type='FormatGCNInput', num_person=1),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
# 这里的split='xsub_train'、split='xsub_val'可以按照自己写入的时候的key键进行修改,但是要保证
# wei_xsub_stgn++_ch.pkl中的和这里的一致
data = dict(
videos_per_gpu=1,
workers_per_gpu=1,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type='RepeatDataset',
times=5,
dataset=dict(type=dataset_type, ann_file=ann_file, pipeline=train_pipeline, split='xsub_train')),
val=dict(type=dataset_type, ann_file=ann_file, pipeline=val_pipeline, split='xsub_val'),
test=dict(type=dataset_type, ann_file=ann_file, pipeline=test_pipeline, split='xsub_val'))
# optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# 可以修改训练的轮数total_epochs
total_epochs = 100
checkpoint_config = dict(interval=1)
evaluation = dict(interval=1, metrics=['top_k_accuracy'])
log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
# runtime settings
log_level = 'INFO'
# work_dir为保存训练结果文件的地方,可以自己修改
work_dir = './work_dirs/stgcn++/stgcn++_ntu120_xsub_hrnet/j_leibie4_1'
命令行输入
python tools/train.py configs/stgcn++/stgcn++_ntu120_xsub_hrnet/j.py --validate --test-last --test-best
会在work_dirs下生成模型权重。
6、建立数据集标签文件
需要在 tools/data/label_map文件夹下建立数据集标签文件mydata.txt,从小到大排列,这样得到的输出视频画面中的标签才不会错。
climb
falldown
handup
walk
7、使用自己训练好的权重模型测试视频
python demo/demo_skeleton.py demo/put.mp4 demo/out.mp4 --config ./configs/stgcn++/stgcn++ntu120_xsub_hrnet/j.py --checkpoint ./work_dirs/stgcn++/stgcn++ntu120_xsub_hrnet/j_Wei5/best_top1_acc_epoch_11.pth --label-map ./tools/data/label_map/Weizmann.txt
之后就输出啦在demo里面了。