mmdetection_2-准备coco格式数据集(VOC转COCO)

mmdetection_2-准备coco格式数据集(VOC转COCO)

  1. xml文件与图像文件放在一个文件夹下。

    Annotations/

    ​ 1.xml
    ​ 1.jpg
    ​ 2.xml
    ​ 2.jpg
    ​ …

  2. 创建xml_2_coco.py

    #coding:utf-8
     
    # pip install lxml
     
    import os
    import glob
    import json
    import shutil
    import numpy as np
    import xml.etree.ElementTree as ET
     
     
     
    path2 = "./coco/" # 输出文件夹
    # classes = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 
    # 'small-vehicle', 'large-vehicle', 'ship', 
    # 'tennis-court', 'basketball-court',  
    # 'storage-tank', 'soccer-ball-field', 
    # 'roundabout', 'harbor', 
    # 'swimming-pool', 'helicopter','container-crane',]  # 类别
    
    classes=['plastic_bag','carton','plastic_bottle','hydrophyte','deciduous_aggregates','plastic_cup','cans']
    xml_dir = "Annotations/" # xml文件
    img_dir = "/media/wntlab/39e84b7d-5985-43ce-a0fa-a7f312f85897/HJK/dataset/data_voc_2021.11.1/" # 图片
    train_ratio = 0.85 # 训练集的比例
    
    START_BOUNDING_BOX_ID = 1
     
     
    def get(root, name):
        return root.findall(name)
     
     
    def get_and_check(root, name, length):
        vars = root.findall(name)
        if len(vars) == 0:
            raise NotImplementedError('Can not find %s in %s.'%(name, root.tag))
        if length > 0 and len(vars) != length:
            raise NotImplementedError('The size of %s is supposed to be %d, but is %d.'%(name, length, len(vars)))
        if length == 1:
            vars = vars[0]
        return vars
     
     
    def convert(xml_list, json_file):
        json_dict = {"images": [], "type": "instances", "annotations": [], "categories": []}
        categories = pre_define_categories.copy()
        bnd_id = START_BOUNDING_BOX_ID
        all_categories = {}
        for index, line in enumerate(xml_list):
            # print("Processing %s"%(line))
            xml_f = line
            tree = ET.parse(xml_f)
            root = tree.getroot()
            
            filename = os.path.basename(xml_f)[:-4] + ".JPG"
            image_id = 20190000001 + index
            size = get_and_check(root, 'size', 1)
            width = int(get_and_check(size, 'width', 1).text)
            height = int(get_and_check(size, 'height', 1).text)
            image = {'file_name': filename, 'height': height, 'width': width, 'id':image_id}
            json_dict['images'].append(image)
            ## Cruuently we do not support segmentation
            #  segmented = get_and_check(root, 'segmented', 1).text
            #  assert segmented == '0'
            for obj in get(root, 'object'):
                category = get_and_check(obj, 'name', 1).text
                if category in all_categories:
                    all_categories[category] += 1
                else:
                    all_categories[category] = 1
                if category not in categories:
                    if only_care_pre_define_categories:
                        continue
                    new_id = len(categories) + 1
                    print("[warning] category '{}' not in 'pre_define_categories'({}), create new id: {} automatically".format(category, pre_define_categories, new_id))
                    categories[category] = new_id
                category_id = categories[category]
                bndbox = get_and_check(obj, 'bndbox', 1)
                xmin = int(float(get_and_check(bndbox, 'xmin', 1).text))
                ymin = int(float(get_and_check(bndbox, 'ymin', 1).text))
                xmax = int(float(get_and_check(bndbox, 'xmax', 1).text))
                ymax = int(float(get_and_check(bndbox, 'ymax', 1).text))
                assert(xmax > xmin), "xmax <= xmin, {}".format(line)
                assert(ymax > ymin), "ymax <= ymin, {}".format(line)
                o_width = abs(xmax - xmin)
                o_height = abs(ymax - ymin)
                ann = {'area': o_width*o_height, 'iscrowd': 0, 'image_id':
                       image_id, 'bbox':[xmin, ymin, o_width, o_height],
                       'category_id': category_id, 'id': bnd_id, 'ignore': 0,
                       'segmentation': []}
                json_dict['annotations'].append(ann)
                bnd_id = bnd_id + 1
     
        for cate, cid in categories.items():
            cat = {'supercategory': 'none', 'id': cid, 'name': cate}
            json_dict['categories'].append(cat)
        json_fp = open(json_file, 'w')
        json_str = json.dumps(json_dict)
        json_fp.write(json_str)
        json_fp.close()
        print("------------create {} done--------------".format(json_file))
        print("find {} categories: {} -->>> your pre_define_categories {}: {}".format(len(all_categories), all_categories.keys(), len(pre_define_categories), pre_define_categories.keys()))
        print("category: id --> {}".format(categories))
        print(categories.keys())
        print(categories.values())
    
    if __name__ == '__main__':
    
        pre_define_categories = {}
        for i, cls in enumerate(classes):
            pre_define_categories[cls] = i + 1
        # pre_define_categories = {'a1': 1, 'a3': 2, 'a6': 3, 'a9': 4, "a10": 5}
        only_care_pre_define_categories = True
        # only_care_pre_define_categories = False
    
        if os.path.exists(path2 + "/annotations"):
            shutil.rmtree(path2 + "/annotations")
        os.makedirs(path2 + "/annotations")
        if os.path.exists(path2 + "/train2017"):
            shutil.rmtree(path2 + "/train2017")
        os.makedirs(path2 + "/train2017")
        if os.path.exists(path2 + "/val2017"):
            shutil.rmtree(path2 +"/val2017")
        os.makedirs(path2 + "/val2017")
        
    
        save_json_train = path2 + 'annotations/instances_train2017.json'
        save_json_val = path2 + 'annotations/instances_val2017.json'
    
    
        xml_list = glob.glob(xml_dir + "/*.xml")
        xml_list = np.sort(xml_list)
        np.random.seed(100)
        np.random.shuffle(xml_list)
    
        train_num = int(len(xml_list)*train_ratio)
        xml_list_train = xml_list[:train_num]
        xml_list_val = xml_list[train_num:]
    
        convert(xml_list_train, save_json_train)
        convert(xml_list_val, save_json_val)
    
    
    
        f1 = open(path2 + "train.txt", "w")
        for xml in xml_list_train:
            img = img_dir + xml.split("\\")[-1][:-4] + ".JPG"
            f1.write(os.path.basename(xml)[:-4] + "\n")
            shutil.copyfile(img, path2 + "/train2017/" + os.path.basename(img))
    
        f2 = open(path2 + "test.txt", "w")
        for xml in xml_list_val:
            img = img_dir + xml.split("\\")[-1][:-4] + ".JPG"
            f2.write(os.path.basename(xml)[:-4] + "\n") 
            shutil.copyfile(img, path2 + "/val2017/" + os.path.basename(img))
        f1.close()
        f2.close()
        print("-------------------------------")
        print("train number:", len(xml_list_train))
        print("val number:", len(xml_list_val))
    

    注意修改

    ​ classes:自己的目标类别
    ​ xml_dir:图片与xml文件
    ​ img_dir: xml_dir的上级目录

  3. 设置好路径之后,允许该py文件,得到coco格式数据集

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值