pyskl训练自己的数据集

用pyskl训练自己的数据集真的是有很多问题,好在最后我们弄出来了,真的很开心,这里面多亏了我们小组的合作,此外还有网上那些无私奉献的人们,自己写这篇也希望能够帮到一些正在做这个的人,也希望对学习的人有所启发。还有就是这一篇只是对pyskl这类文章的补充。

注意:这里仿的最多的是这一篇

Pyskl自定义数据集_墨末..的博客-CSDN博客

1.准备数据集,这里仿照的是这一篇

使用pyskl的stgcn++训练自己的数据集_大脸猫105的博客-CSDN博客

数据集分成两部分,一个是train,用于训练。另一个是test,用于测试。首先你需要在pyskl-main/tools/data/下面新建一个文件,文件名字为Weizman。之后再在Weizman中新建两个文件。一个叫test,另一个叫train,效果如下:

两个文件路经分别是E:\下载\pyskl-main\tools\data\Weizmann\test

                                E:\下载\pyskl-main\tools\data\Weizmann\train 

建了之后就把视频放在里面,这里是2:7的比例,测试占十分之2,训练视频九分之7。意思是一个类里面有九个视频,两个测试,7个训练。这样的类一共有九个。这个自行安排。但是训练集要比测试集要多。多多少应该也有要求。大家可以查查

2.类别标签,意思就是视频的名称,比如moshe_0.avi,这里就不要改了。

之后参考Pyskl自定义数据集_墨末..的博客-CSDN博客

3.生成test.json。train.json,list文件(pycharm上面运行)

这里把所有的代码都放在一个python文件里,然后设置一个主函数main,用哪个调用那个就行

用到的代码如下(这里是这个项目用的所有代码):这个文件可以放在这里

import os
import decord
import json
from mmcv import load, dump

from pyskl.smp import mwlines


def writeJson(path_train, jsonpath):
    outpot_list = []
    trainfile_list = os.listdir(path_train)
    for train_name in trainfile_list:
        traindit = {}
        sp = train_name.split('_')
        traindit['vid_name'] = train_name.replace('.avi', '')
        traindit['label'] = int(sp[1].replace('.avi', ''))
        traindit['start_frame'] = 0

        video_path = os.path.join(path_train, train_name)
        vid = decord.VideoReader(video_path)
        traindit['end_frame'] = len(vid)
        outpot_list.append(traindit.copy())
    with open(jsonpath, 'w') as outfile:
        json.dump(outpot_list, outfile)


def writeList(dirpath, name):
    path_train = os.path.join(dirpath, 'train')
    path_test = os.path.join(dirpath, 'test')
    trainfile_list = os.listdir(path_train)
    testfile_list = os.listdir(path_test)

    train = []
    for train_name in trainfile_list:
        traindit = {}
        sp = train_name.split('_')

        traindit['vid_name'] = train_name
        traindit['label'] = sp[1].replace('.avi', '')
        train.append(traindit)
    test = []
    for test_name in testfile_list:
        testdit = {}
        sp = test_name.split('_')
        testdit['vid_name'] = test_name
        testdit['label'] = sp[1].replace('.avi', '')
        test.append(testdit)

    tmpl1 = os.path.join(path_train, '{}')
    lines1 = [(tmpl1 + ' {}').format(x['vid_name'], x['label']) for x in train]

    tmpl2 = os.path.join(path_test, '{}')
    lines2 = [(tmpl2 + ' {}').format(x['vid_name'], x['label']) for x in test]
    lines = lines1 + lines2
    mwlines(lines, os.path.join(dirpath, name))


def traintest(dirpath, pklname, newpklname):
    os.chdir(dirpath)
    train = load('train.json')
    test = load('test.json')
    annotations = load(pklname)
    split = dict()
    split['xsub_train'] = [x['vid_name'] for x in train]
    split['xsub_val'] = [x['vid_name'] for x in test]
    dump(dict(split=split, annotations=annotations), newpklname)


if __name__ == '__main__':

    dirpath = './data/Weizmann'
    pklname = 'train.pkl'
    newpklname = 'Wei_xsub_stgn++.pkl'
    # writeJson('./data/Weizmann/test', 'test.json')   
    traintest(dirpath, pklname, newpklname)
    # writeList('./data/Weizmann.list', 'Weizmann.list')  

4.生成train.pkl文件,这里先修改custom_2d_skeleton.py文件(pycharm上面运行)

# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
# import pdb
import pyskl
from mmdet.apis import inference_detector, init_detector
from mmpose.apis import inference_top_down_pose_model, init_pose_model
import decord
import mmcv
import numpy as np
# import torch.distributed as dist
from tqdm import tqdm
import mmdet
# import mmpose
# from pyskl.smp import mrlines
import cv2

from pyskl.smp import mrlines


def extract_frame(video_path):
    vid = decord.VideoReader(video_path)
    return [x.asnumpy() for x in vid]


def detection_inference(model, frames):
    model = model.cuda()
    results = []
    for frame in frames:
        result = inference_detector(model, frame)
        results.append(result)
    return results


def pose_inference(model, frames, det_results):
    model = model.cuda()
    assert len(frames) == len(det_results)
    total_frames = len(frames)
    num_person = max([len(x) for x in det_results])
    kp = np.zeros((num_person, total_frames, 17, 3), dtype=np.float32)

    for i, (f, d) in enumerate(zip(frames, det_results)):
        # Align input format
        d = [dict(bbox=x) for x in list(d)]
        pose = inference_top_down_pose_model(model, f, d, format='xyxy')[0]
        for j, item in enumerate(pose):
            kp[j, i] = item['keypoints']
    return kp

pyskl_root = osp.dirname(pyskl.__path__[0])
default_det_config = f'{pyskl_root}/demo/faster_rcnn_r50_fpn_1x_coco-person.py'
default_det_ckpt = (
    'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/'
    'faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth')
default_pose_config = f'{pyskl_root}/demo/hrnet_w32_coco_256x192.py'
default_pose_ckpt = (
    'https://download.openmmlab.com/mmpose/top_down/hrnet/'
    'hrnet_w32_coco_256x192-c78dce93_20200708.pth')


def parse_args():
    parser = argparse.ArgumentParser(
        description='Generate 2D pose annotations for a custom video dataset')
    # * Both mmdet and mmpose should be installed from source
    # parser.add_argument('--mmdet-root', type=str, default=default_mmdet_root)
    # parser.add_argument('--mmpose-root', type=str, default=default_mmpose_root)

    # parser.add_argument('--det-config', type=str, default='../refe/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py')
    # parser.add_argument('--det-ckpt', type=str,
    #                     default='../refe/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth')
    parser.add_argument(
        '--det-config',
        # default='../refe/faster_rcnn_r50_fpn_2x_coco.py',
        default=default_det_config,
        help='human detection config file path (from mmdet)')

    parser.add_argument(
        '--det-ckpt',
        default=default_det_ckpt,
        help='human detection checkpoint file/url')

    parser.add_argument('--pose-config', type=str, default=default_pose_config)
    parser.add_argument('--pose-ckpt', type=str, default=default_pose_ckpt)
    # * Only det boxes with score larger than det_score_thr will be kept
    parser.add_argument('--det-score-thr', type=float, default=0.7)
    # * Only det boxes with large enough sizes will be kept,
    parser.add_argument('--det-area-thr', type=float, default=1300)
    # * Accepted formats for each line in video_list are:
    # * 1. "xxx.mp4" ('label' is missing, the dataset can be used for inference, but not training)
    # * 2. "xxx.mp4 label" ('label' is an integer (category index),
    # * the result can be used for both training & testing)
    # * All lines should take the same format.
    parser.add_argument('--video-list', type=str, help='the list of source videos')
    # * out should ends with '.pkl'
    parser.add_argument('--out', type=str, help='output pickle name')
    parser.add_argument('--tmpdir', type=str, default='tmp')
    parser.add_argument('--local_rank', type=int, default=1)
    # pdb.set_trace()

    # if 'RANK' not in os.environ:
    #     os.environ['RANK'] = str(args.local_rank)
    #     os.environ['WORLD_SIZE'] = str(1)
    # os.environ['MASTER_ADDR'] = 'localhost'
    # os.environ['MASTER_PORT'] = '12345'

    args = parser.parse_args()
    return args


def main():
    args = parse_args()
    assert args.out.endswith('.pkl')

    lines = mrlines(args.video_list)
    lines = [x.split() for x in lines]

    assert len(lines[0]) in [1, 2]
    if len(lines[0]) == 1:
        annos = [dict(frame_dir=osp.basename(x[0]).split('.')[0], filename=x[0]) for x in lines]
    else:
        annos = [dict(frame_dir=osp.basename(x[0]).split('.')[0], filename=x[0], label=int(x[1])) for x in lines]

    rank = 0  # 添加该
    world_size = 1  # 添加

    # init_dist('pytorch', backend='nccl')
    # rank, world_size = get_dist_info()
    #
    # if rank == 0:
    #     os.makedirs(args.tmpdir, exist_ok=True)
    # dist.barrier()
    my_part = annos
    # my_part = annos[rank::world_size]
    print("from det_model")
    det_model = init_detector(args.det_config, args.det_ckpt, 'cuda')
    assert det_model.CLASSES[0] == 'person', 'A detector trained on COCO is required'
    print("from pose_model")
    pose_model = init_pose_model(args.pose_config, args.pose_ckpt, 'cuda')
    n = 0
    for anno in tqdm(my_part):
        frames = extract_frame(anno['filename'])
        print("anno['filename", anno['filename'])
        det_results = detection_inference(det_model, frames)
        # * Get detection results for human
        det_results = [x[0] for x in det_results]
        for i, res in enumerate(det_results):
            # * filter boxes with small scores
            res = res[res[:, 4] >= args.det_score_thr]
            # * filter boxes with small areas
            box_areas = (res[:, 3] - res[:, 1]) * (res[:, 2] - res[:, 0])
            assert np.all(box_areas >= 0)
            res = res[box_areas >= args.det_area_thr]
            det_results[i] = res

        pose_results = pose_inference(pose_model, frames, det_results)
        shape = frames[0].shape[:2]
        anno['img_shape'] = anno['original_shape'] = shape
        anno['total_frames'] = len(frames)
        anno['num_person_raw'] = pose_results.shape[0]
        anno['keypoint'] = pose_results[..., :2].astype(np.float16)
        anno['keypoint_score'] = pose_results[..., 2].astype(np.float16)
        anno.pop('filename')

    mmcv.dump(my_part, osp.join(args.tmpdir, f'part_{rank}.pkl'))
    # dist.barrier()

    if rank == 0:
        parts = [mmcv.load(osp.join(args.tmpdir, f'part_{i}.pkl')) for i in range(world_size)]
        rem = len(annos) % world_size
        if rem:
            for i in range(rem, world_size):
                parts[i].append(None)

        ordered_results = []
        for res in zip(*parts):
            ordered_results.extend(list(res))
        ordered_results = ordered_results[:len(annos)]
        mmcv.dump(ordered_results, args.out)


if __name__ == '__main__':
    # default_mmdet_root = osp.dirname(mmcv.__path__[0])
    # default_mmpose_root = osp.dirname(mmcv.__path__[0])
    main()

之后在conda prompt输入

python D:/pyskl-main/tools/data/custom_2d_skeleton.py --video-list  ./tools/data/Weizmann/Weizmann.list --out  ./tools/data/Weizmann/train.pkl

这里面tools前面一定要加上/,否则输出不了pkl文件

5.生成最终的pkl文件,这里参考磨墨的教程就可以了(pycharm上面运行)

6.更改joint(posec3d)或j.py(stgcnn++)(改了之后在pycharm上面运行)

如果你是stgcnn,就参考墨末的,如果是posec3d,参考基于pyskl的poseC3D训练自己的数据集_骑走的小木马的博客-CSDN博客

这里需要更改joint里面的代码,直接在源代码里改

model = dict(
    type='Recognizer3D',
    backbone=dict(
        type='C3D',
        in_channels=17,
        base_channels=32,
        num_stages=3,
        temporal_downsample=False),
    cls_head=dict(
        type='I3DHead',
        in_channels=256,
        num_classes=10,#改的
        dropout=0.5),
    test_cfg=dict(average_clips='prob'))

dataset_type = 'PoseDataset'
ann_file = r'D:\pyskl-main\tools\data\Weizmann\Wei_xsub_posec3d.pkl'
left_kp = [1, 3, 5, 7, 9, 11, 13, 15]
right_kp = [2, 4, 6, 8, 10, 12, 14, 16]
train_pipeline = [
    dict(type='UniformSampleFrames', clip_len=48),
    dict(type='PoseDecode'),
    dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
    dict(type='Resize', scale=(-1, 64)),
    dict(type='RandomResizedCrop', area_range=(0.56, 1.0)),
    dict(type='Resize', scale=(56, 56), keep_ratio=False),
    dict(type='Flip', flip_ratio=0.5, left_kp=left_kp, right_kp=right_kp),
    dict(type='GeneratePoseTarget', with_kp=True, with_limb=False),
    dict(type='FormatShape', input_format='NCTHW_Heatmap'),
    dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
    dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
    dict(type='UniformSampleFrames', clip_len=48, num_clips=1),
    dict(type='PoseDecode'),
    dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
    dict(type='Resize', scale=(64, 64), keep_ratio=False),
    dict(type='GeneratePoseTarget', with_kp=True, with_limb=False),
    dict(type='FormatShape', input_format='NCTHW_Heatmap'),
    dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
    dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
    dict(type='UniformSampleFrames', clip_len=48, num_clips=10),
    dict(type='PoseDecode'),
    dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
    dict(type='Resize', scale=(64, 64), keep_ratio=False),
    dict(type='GeneratePoseTarget', with_kp=True, with_limb=False, double=True, left_kp=left_kp, right_kp=right_kp),
    dict(type='FormatShape', input_format='NCTHW_Heatmap'),
    dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
    dict(type='ToTensor', keys=['imgs'])
]
data = dict(
    videos_per_gpu=32,
    workers_per_gpu=4,
    test_dataloader=dict(videos_per_gpu=1),
    train=dict(
        type='RepeatDataset',
        times=10,
        dataset=dict(type=dataset_type, ann_file=ann_file, split='xsub_train', pipeline=train_pipeline)),#改的xsub_train
    val=dict(type=dataset_type, ann_file=ann_file, split='xsub_val', pipeline=val_pipeline),改的xsub_val
    test=dict(type=dataset_type, ann_file=ann_file, split='xsub_val', pipeline=test_pipeline))改的
# optimizer
optimizer = dict(type='SGD', lr=0.4, momentum=0.9, weight_decay=0.0003)
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnnealing', by_epoch=False, min_lr=0)
total_epochs = 24
checkpoint_config = dict(interval=1)
evaluation = dict(interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5))
log_config = dict(interval=20, hooks=[dict(type='TextLoggerHook')])
log_level = 'INFO'
work_dir = './work_dirs/posec3d/c3d_light_gym/joint'

7.训练模型

posec3d

bash tools/dist_train.sh configs/posec3d/c3d_light_gym/joint.py 1

这里得到的pth就找loss最小的就行,我的是23.pth

运行的时候前面有很多代码警告,不用担心,等等看,看看最后有没有出现

2023-08-10 14:38:43,157 - pyskl - INFO - Epoch [19][20/22]      lr: 4.353e-02, eta: 0:02:00, time: 1.140, data_time: 0.796, memory: 3433, top1_acc: 0.9031, top5_acc: 1.0000, loss_cls: 0.2292, loss: 0.2292, grad_norm: 0.3940
2023-08-10 14:38:44,144 - pyskl - INFO - Saving checkpoint at 19 epochs
2023-08-10 14:39:07,127 - pyskl - INFO - Epoch [20][20/22]      lr: 2.861e-02, eta: 0:01:36, time: 1.140, data_time: 0.795, memory: 3433, top1_acc: 0.9234, top5_acc: 1.0000, loss_cls: 0.2308, loss: 0.2308, grad_norm: 0.3883
2023-08-10 14:39:08,125 - pyskl - INFO - Saving checkpoint at 20 epochs
2023-08-10 14:39:31,233 - pyskl - INFO - Epoch [21][20/22]      lr: 1.662e-02, eta: 0:01:12, time: 1.146, data_time: 0.794, memory: 3433, top1_acc: 0.9297, top5_acc: 1.0000, loss_cls: 0.2041, loss: 0.2041, grad_norm: 0.3407
2023-08-10 14:39:32,225 - pyskl - INFO - Saving checkpoint at 21 epochs
2023-08-10 14:39:55,137 - pyskl - INFO - Epoch [22][20/22]      lr: 7.770e-03, eta: 0:00:49, time: 1.138, data_time: 0.791, memory: 3433, top1_acc: 0.9344, top5_acc: 1.0000, loss_cls: 0.1913, loss: 0.1913, grad_norm: 0.3205
2023-08-10 14:39:56,140 - pyskl - INFO - Saving checkpoint at 22 epochs
2023-08-10 14:40:19,149 - pyskl - INFO - Epoch [23][20/22]      lr: 2.209e-03, eta: 0:00:25, time: 1.140, data_time: 0.794, memory: 3433, top1_acc: 0.9406, top5_acc: 1.0000, loss_cls: 0.1776, loss: 0.1776, grad_norm: 0.2968
2023-08-10 14:40:20,143 - pyskl - INFO - Saving checkpoint at 23 epochs
2023-08-10 14:40:43,282 - pyskl - INFO - Epoch [24][20/22]      lr: 3.186e-05, eta: 0:00:02, time: 1.148, data_time: 0.810, memory: 3433, top1_acc: 0.9313, top5_acc: 1.0000, loss_cls: 0.1793, loss: 0.1793, grad_norm: 0.2924
2023-08-10 14:40:44,266 - pyskl - INFO - Saving checkpoint at 24 epochs

出现了就是可以啦

如果是训练stgcnn++的,可以参考墨末的训练代码,这个有最好的pth文件

8.新建标签文件

在toos/data/label-map下面新建一个shiyan.txt

内容如下

{'wave2_0':'0'
'bend_1':'1'
'jack_2':'2'
'jump_3':'3'
'pjump_4':'4'
'run_5':'5'
'side_6':'6'
'skip__7':'7'
'walk_8':'8'
'wave1_9':'9'}

注意:要竖着列,就按我这个列好吗,要不然就又出问题了,先听我的

9.测试

测试的话(conda运行)

python demo/demo_skeleton.py D:/pyskl-main/tools/data/Weizmann/test/moshe_0.avi demo/moshe_0_demo.mp4 --config configs/posec3d/c3d_light_gym/joint.py --checkpoint work_dirs/posec3d/c3d_light_gym/joint/epoch_23.pth --label-map tools/data/label_map/shiyan.txt

这里第一个一定要完整的路径,要不然会出现list index问题

之后就输出啦在demo里面看看有没有。有就太好了。有啥问题尽管问。我知道的尽力会回答你。祝你们好运

  • 5
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 36
    评论
当你运行代码时出现"ModuleNotFoundError: No module named 'pyskl'"的错误提示,这意味着你的Python环境中没有安装名为'pyskl'的模块或包。 要解决这个问题,你需要确保正确安装了需要使用的模块或包。你可以使用pip工具来安装模块或包,例如在命令行中输入"pip install pyskl"来安装'pyskl'模块。如果你知道模块的准确名称,也可以在代码中使用import语句来导入模块。例如,使用"import pyskl"来导入'pyskl'模块。 如果你已经安装了'pyskl'模块但仍然出现这个错误,可能是由于模块名称的拼写错误或者模块文件没有被正确放置在Python的搜索路径中所导致的。你可以检查拼写错误并确保模块文件存在于正确的位置。另外,你也可以尝试重启你的Python解释器,有时候这样可以解决一些导入问题。 总结起来,当出现"ModuleNotFoundError: No module named 'pyskl'"的错误时,你需要检查是否正确安装了需要使用的模块或包,可以使用pip工具安装或者使用import语句导入模块。如果仍然出现错误,可以检查拼写错误、模块文件位置和重启Python解释器来解决问题。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* [“ModuleNotFoundError: No module named sklearn”解决办法](https://blog.csdn.net/qq_43604183/article/details/128863955)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 33.333333333333336%"] - *2* [Python模块导入出现ModuleNotFoundError: No module named ‘***’](https://download.csdn.net/download/weixin_38658085/14885867)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 33.333333333333336%"] - *3* [python import as](https://blog.csdn.net/weixin_43301333/article/details/120974345)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 33.333333333333336%"] [ .reference_list ]
评论 36
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值