使用paddledetection训练自定义coco数据集

第一步 使用lambel制作数据集

使用lambel制作数据集的教程多的是,这里就不写了

第二步 将lambel数据集转化为coco格式

识别类数据集转化为coco格式

在paddledetection的tool文件夹下就已经存在了转换代码tools/x2coco.py,在终端执行python tools/x2coco.py --dataset_type labelme --json_input_dir 添加要转换得json路径 --image_input_dir 添加源图片路径 --output_dir 添加转化后要保存得路径 --train_proportion train的比例 --val_proportion validation的比例 --test_proportion infer的比例

关键点类数据集转化为coco格式

此处用的是 别人的代码
先将图片名称改为12位数字的形式,如000000000001.jpg。代码如下:

import os
from tqdm import tqdm
import codecs
import json
for root, _, path in os.walk(<保存图片的文件夹路径>):
    for s in tqdm(path):
        os.renames('<保存图片的文件夹路径>/{}'.format(s), 'dataset_new/%012d.jpg' %int(s.split('.')[0]))

然后执行转化代码,当然,在执行前也要将json文件改为000000000001.json的格式

import numpy as np
import json
import glob
import codecs
import os

class MyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        else:
            return super(MyEncoder, self).default(obj)


class tococo(object):
    def __init__(self, jsonfile, save_path, a):
        self.images = []
        self.categories = [
            {
                "supercategory": "person",
                "id": 1,
                "name": "person",
                "keypoints":
                    [
                        "nose",
                        "left_ear",
                        "right_ear",
                        "left_shoulder",
                        "right_shoulder",
                        "left_elbow",
                        "right_elbow",
                        "left_wrist",
                        "right_wrist"
                    ],
                "skeleton": [
                    [6, 8],
                    [4, 6],
                    [2, 4],
                    [1, 2],
                    [3, 1],
                    [5, 3],
                    [7, 5],
                    [9, 7],
                ]
            }
        ]
        self.annotations = []
        self.jsonfile = os.listdir(jsonfile)
        self.save_path = save_path  # 保存json的路径
        self.class_id = a  # class  我们的类别只有一个 person
        self.coco = {}
        self.path = jsonfile

    def labelme_to_coco(self):
        for num, json_file in enumerate(self.jsonfile):
            json_file = os.path.join(self.path, json_file)
            data = codecs.open(json_file, 'r')
            data = json.load(data)
            self.images.append(self.get_images(json_file[-17:-4] + 'jpg', data["imageHeight"], data["imageWidth"]))
            shapes = data["shapes"]
            annotation = {}  # 一个annotation代表一张图片中的所有samples
            num_keypoints = 0
            keypoints = [0] * 3 * 9  #这里是我们标注的关节点个数
            flag = 0
            for shape in shapes:
                if shape['shape_type'] == 'rectangle' or shape["label"] == '90' or shape["label"] == '99':
                    bbox = []
                    temp = shape["points"]
                    try:
                        x_min = min(temp[0][0], temp[1][0])
                    except IndexError as e:
                        print('class: {}, image: {}'.format(self.class_id, int(json_file[-17:-5])))

                    x_max = max(temp[0][0], temp[1][0])
                    y_min = min(temp[0][1], temp[1][1])
                    y_max = max(temp[0][1], temp[1][1])
                    bbox.append(x_min)
                    bbox.append(y_min)
                    w = x_max - x_min + 1
                    h = y_max - y_min + 1
                    bbox.append(w)
                    bbox.append(h)
                    annotation['bbox'] = bbox
                    flag = flag + 1
                else:
                    idx = int(shape['label'])

                    try:
                        keypoints[(idx - 1) * 3 + 0] = shape['points'][0][0]
                        keypoints[(idx - 1) * 3 + 1] = shape['points'][0][1]
                        keypoints[(idx - 1) * 3 + 2] = 2
                        num_keypoints = num_keypoints + 1
                    except IndexError as e:
                        print('class: {}, image: {}'.format(self.class_id, int(json_file[-17:-5])))

            if flag == 0:
                print('{}\\{} does not contain bbox\n'.format(self.class_id, json_file))
            annotation['segmentation'] = [[]]
            annotation['num_keypoints'] = num_keypoints
            try:
                annotation['area'] = 0
            except ValueError as e:
                print(json_file[-17:-5])
                # print(w, h)
            annotation['iscrowd'] = 0
            annotation['keypoints'] = keypoints
            annotation['image_id'] = int(json_file[-17:-5])  # 对应的图片ID
            annotation['bbox'] = [0, 0, data['imageWidth'], data['imageHeight']]
            annotation['category_id'] = 1
            annotation['id'] = int(json_file[-17:-5])  # 对象id
            self.annotations.append(annotation)
            self.image_id = int(json_file[-17:-5])

        self.coco["images"] = self.images
        self.coco["categories"] = self.categories
        self.coco["annotations"] = self.annotations



    def get_images(self, filename, height, width):
        image = {}
        image["height"] = height
        image['width'] = width
        image["id"] = int(filename[-16:-4])
        image["file_name"] = filename
        return image

    def get_categories(self, name, class_id):
        category = {}
        category["supercategory"] = "person"
        category['id'] = class_id
        category['name'] = name
        return category

    def save_json(self):
        self.labelme_to_coco()
        coco_data = self.coco
        # 保存json文件
        json.dump(coco_data, open(self.save_path, 'w'), indent=4, cls=MyEncoder)  # indent=4 更加美观显示
        return self.image_id


json_path = r'J:\keypoint_own/label_val'   #保存json的文件夹路径
c = tococo(json_path, save_path='val.json', a=1)  #我们将我们的左右json文件合成为一个json文件,这是最后json文件的名称
image_id = c.save_json()

转化结束后,我们再使用一段代码去验证转化的coco格式是否正确

import skimage.io as io
import pylab
import time as time
import json
import numpy as np
from collections import defaultdict
import itertools
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection


def _isArrayLike(obj):
    return hasattr(obj, '__iter__') and hasattr(obj, '__len__')


class COCO:
    def __init__(self, annotation_file=None):
        """
        Constructor of Microsoft COCO helper class for reading and visualizing annotations.
        :param annotation_file (str): location of annotation file
        :param image_folder (str): location to the folder that hosts images.
        :return:
        """
        # load dataset
        self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict(), dict()
        self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
        if not annotation_file == None:
            print('loading annotations into memory...')
            tic = time.time()
            dataset = json.load(open(annotation_file, 'r'))
            assert type(dataset) == dict, 'annotation file format {} not supported'.format(type(dataset))
            print('Done (t={:0.2f}s)'.format(time.time() - tic))
            self.dataset = dataset
            self.createIndex()

    def createIndex(self):
        # create index
        print('creating index...')
        anns, cats, imgs = {}, {}, {}
        imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
        if 'annotations' in self.dataset:
            for ann in self.dataset['annotations']:
                imgToAnns[ann['image_id']].append(ann)
                anns[ann['id']] = ann

        if 'images' in self.dataset:
            for img in self.dataset['images']:
                imgs[img['id']] = img

        if 'categories' in self.dataset:
            for cat in self.dataset['categories']:
                cats[cat['id']] = cat

        if 'annotations' in self.dataset and 'categories' in self.dataset:
            for ann in self.dataset['annotations']:
                catToImgs[ann['category_id']].append(ann['image_id'])

        print('index created!')

        # create class members
        self.anns = anns
        self.imgToAnns = imgToAnns
        self.catToImgs = catToImgs
        self.imgs = imgs
        self.cats = cats

    def getCatIds(self, catNms=[], supNms=[], catIds=[]):
        """
        filtering parameters. default skips that filter.
        :param catNms (str array)  : get cats for given cat names
        :param supNms (str array)  : get cats for given supercategory names
        :param catIds (int array)  : get cats for given cat ids
        :return: ids (int array)   : integer array of cat ids
        """
        catNms = catNms if _isArrayLike(catNms) else [catNms]
        supNms = supNms if _isArrayLike(supNms) else [supNms]
        catIds = catIds if _isArrayLike(catIds) else [catIds]

        if len(catNms) == len(supNms) == len(catIds) == 0:
            cats = self.dataset['categories']

        else:
            cats = self.dataset['categories']
            # print(' ')
            # print('keypoints的cat就只有人1种')
            # print(cats)
            cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
            cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
            cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
            # print(cats)
        ids = [cat['id'] for cat in cats]
        return ids

    def loadCats(self, ids=[]):
        """
        Load cats with the specified ids.
        :param ids (int array)       : integer ids specifying cats
        :return: cats (object array) : loaded cat objects
        """
        if _isArrayLike(ids):
            return [self.cats[id] for id in ids]
        elif type(ids) == int:
            return [self.cats[ids]]

    def getImgIds(self, imgIds=[], catIds=[]):
        '''
        Get img ids that satisfy given filter conditions.
        :param imgIds (int array) : get imgs for given ids
        :param catIds (int array) : get imgs with all given cats
        :return: ids (int array)  : integer array of img ids
        '''
        imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
        catIds = catIds if _isArrayLike(catIds) else [catIds]

        if len(imgIds) == len(catIds) == 0:
            ids = self.imgs.keys()
        else:
            ids = set(imgIds)
            for i, catId in enumerate(catIds):
                if i == 0 and len(ids) == 0:
                    ids = set(self.catToImgs[catId])
                else:
                    ids &= set(self.catToImgs[catId])
        return list(ids)

    def loadImgs(self, ids=[]):
        """
        Load anns with the specified ids.
        :param ids (int array)       : integer ids specifying img
        :return: imgs (object array) : loaded img objects
        """
        if _isArrayLike(ids):
            return [self.imgs[id] for id in ids]
        elif type(ids) == int:
            return [self.imgs[ids]]

    def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
        """
        Get ann ids that satisfy given filter conditions. default skips that filter
        :param imgIds  (int array)     : get anns for given imgs
               catIds  (int array)     : get anns for given cats
               areaRng (float array)   : get anns for given area range (e.g. [0 inf])
               iscrowd (boolean)       : get anns for given crowd label (False or True)
        :return: ids (int array)       : integer array of ann ids
        """
        imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
        catIds = catIds if _isArrayLike(catIds) else [catIds]

        if len(imgIds) == len(catIds) == len(areaRng) == 0:
            anns = self.dataset['annotations']
        else:
            # 根据imgIds找到所有的ann
            if not len(imgIds) == 0:
                lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
                anns = list(itertools.chain.from_iterable(lists))
            else:
                anns = self.dataset['annotations']
            # 通过各类条件如catIds对anns进行筛选
            anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
            anns = anns if len(areaRng) == 0 else [ann for ann in anns if
                                                   ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
        if not iscrowd == None:
            ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
        else:

            ids = [ann['id'] for ann in anns]
        return ids

    def loadAnns(self, ids=[]):
        """
        Load anns with the specified ids.
        :param ids (int array)       : integer ids specifying anns
        :return: anns (object array) : loaded ann objects
        """
        if _isArrayLike(ids):
            return [self.anns[id] for id in ids]
        elif type(ids) == int:
            return [self.anns[ids]]

    def showAnns(self, anns):
        """
        Display the specified annotations.
        :param anns (array of object): annotations to display
        :return: None
        """
        if len(anns) == 0:
            return 0
        if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
            datasetType = 'instances'
        elif 'caption' in anns[0]:
            datasetType = 'captions'
        else:
            raise Exception('datasetType not supported')
        if datasetType == 'instances':
            ax = plt.gca()
            ax.set_autoscale_on(False)
            polygons = []
            color = []
            for ann in anns:
                c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
                # if 'segmentation' in ann:
                #     if type(ann['segmentation']) == list:
                #         # polygon
                #         for seg in ann['segmentation']:
                #             poly = np.array(seg).reshape((int(len(seg)/2), 2))
                #             polygons.append(Polygon(poly))
                #             color.append(c)
                #     else:
                #         # mask
                #         t = self.imgs[ann['image_id']]
                #         if type(ann['segmentation']['counts']) == list:
                #             rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
                #         else:
                #             rle = [ann['segmentation']]
                #         m = maskUtils.decode(rle)
                #         img = np.ones( (m.shape[0], m.shape[1(余下全部)], 3(from 1, 10张取一张)) )
                #         if ann['iscrowd'] == 1(余下全部):
                #             color_mask = np.array([2.0,166.0,101.0])/255
                #         if ann['iscrowd'] == 0:
                #             color_mask = np.random.random((1(余下全部), 3(from 1, 10张取一张))).tolist()[0]
                #         for i in range(3(from 1, 10张取一张)):
                #             img[:,:,i] = color_mask[i]
                #         ax.imshow(np.dstack( (img, m*0.5) ))
                if 'keypoints' in ann and type(ann['keypoints']) == list:
                    # turn skeleton into zero-based index
                    sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton']) - 1
                    kp = np.array(ann['keypoints'])
                    x = kp[0::3]
                    y = kp[1::3]
                    v = kp[2::3]
                    for sk in sks:
                        if np.all(v[sk] > 0):
                        #     # 画点之间的连接线
                            plt.plot(x[sk], y[sk], linewidth=1, color=c)
                    # 画点
                    plt.plot(x[v > 0], y[v > 0], 'o', markersize=4, markerfacecolor=c, markeredgecolor='k',
                             markeredgewidth=1)
                    plt.plot(x[v > 1], y[v > 1], 'o', markersize=4, markerfacecolor=c, markeredgecolor=c,
                             markeredgewidth=1)
            p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
            ax.add_collection(p)
            p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
            ax.add_collection(p)
        elif datasetType == 'captions':
            for ann in anns:
                print(ann['caption'])


pylab.rcParams['figure.figsize'] = (8.0, 10.0)

annFile = r'J:\keypoint_own\val.json'     #转换之后的json文件路径
img_prefix = 'J:\keypoint_own\pic_val'	#图片文件夹路径

# initialize COCO api for instance annotations
coco = COCO(annFile)

# getCatIds(catNms=[], supNms=[], catIds=[])
# 通过输入类别的名字、大类的名字或是种类的id,来筛选得到图片所属类别的id
catIds = coco.getCatIds(catNms=['person'])
# getImgIds(imgIds=[], catIds=[])
# 通过图片的id或是所属种类的id得到图片的id
imgIds = coco.getImgIds(catIds=catIds)
# imgIds = coco.getImgIds(imgIds=[1407])

# loadImgs(ids=[])
# 得到图片的id信息后,就可以用loadImgs得到图片的信息了
# 在这里我们随机选取之前list中的一张图片
img = coco.loadImgs(imgIds[np.random.randint(0, len(imgIds))])[0]
I = io.imread('%s/%s' % (img_prefix, img['file_name']))
plt.imshow(I)
plt.axis('off')
ax = plt.gca()

# getAnnIds(imgIds=[], catIds=[], areaRng=[], iscrowd=None)
# 通过输入图片的id、类别的id、实例的面积、是否是人群来得到图片的注释id
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)

# loadAnns(ids=[])
# 通过注释的id,得到注释的信息
anns = coco.loadAnns(annIds)
print('anns:', anns)

coco.showAnns(anns)
plt.imshow(I)
plt.axis('off')
plt.show()

此处可能会提示No module named 'skimage',直接pip install skimage会报错,需要执行pip install scikit-image -i https://pypi.tuna.tsinghua.edu.cn/simple,后面加的是清华镜像源,安装的更快。

第三步 在configs目录中,找到自己需要的网络, 更改一些路径

condigs目录下打开tinypose_128x96.yml,这是人体关键点检测的网络,将默认的路径进行更改。

TrainDataset:
  !KeypointTopDownCocoDataset
    image_dir: "val2017"  #  源图片路径
    anno_path: annotations/person_keypoints_val2017.json #  前面转化完成的json路径
    dataset_dir: dataset/coco #  根路径
    num_joints: *num_joints  #  关键点的个数
    trainsize: *trainsize
    pixel_std: *pixel_std
    use_gt_bbox: True

第四步 开始训练

在终端执行python tools/train.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml即可开始训练

  • 0
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值