VOC格式数据转COCO(For Object Detection)

import os
import random
import shutil
import sys
import json
import glob
from time import sleep
import xml.etree.ElementTree as ET
# os.chdir(sys.path[0])
from tqdm import tqdm # 进度条
import logging
logger = logging.getLogger(__name__)
# critical > error > waring > info > debug
logger.setLevel(logging.DEBUG)
# 创建一个handler,用于输出到控制台
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG) # 输出到控制台的log等级的开关
# 创建该handler的formatter
console_handler.setFormatter(
        logging.Formatter(
                fmt='%(asctime)s - %(levelname)s: %(message)s',
                datefmt='%Y-%m-%d %H:%M:%S')
        )
logger.addHandler(console_handler)

"""
You only need to set the following three parts
1.val_files_num : num of validation samples from your all samples
2.test_files_num = num of test samples from your all samples
3.voc_annotations : path to your VOC dataset Annotations
 
"""
val_files_num = 20
test_files_num = 30
voc_annotations = './VOC2012/Annotations'  #remember to modify the path

main_path = os.path.dirname(os.path.dirname(voc_annotations))

coco_name = 'VOC2012'

coco_path = os.path.join(main_path, coco_name+'_COCO')
coco_images = os.path.join(main_path, coco_name+'_COCO', 'images')
coco_json_annotations = os.path.join(main_path, coco_name+'_COCO','annotations')
xml_val = os.path.join(main_path, 'xml', 'xml_val')
xml_test = os.path.join(main_path, 'xml', 'xml_test')
xml_train = os.path.join(main_path, 'xml', 'xml_train')

voc_images = os.path.join(main_path, coco_name, 'JPEGImages')




#from https://www.php.cn/python-tutorials-424348.html
def mkdir(path):
    path=path.strip()
    path=path.rstrip("\\")
    isExists=os.path.exists(path)
    if not isExists:
        os.makedirs(path)
        logger.info('Folder already exists')
        return True
    logger.warning('Folder already exists')
    return False

#foler to make, please enter full path
# map(mkdir,[coco_path, coco_images, coco_json_annotations, xml_val, xml_test, xml_train])
mkdir(coco_path)
mkdir(coco_images)
mkdir(coco_json_annotations)
mkdir(xml_val)
mkdir(xml_test)
mkdir(xml_train)


tbar = tqdm(list(zip(os.listdir(voc_images), os.listdir(voc_annotations))))
for idx, (img_name, xml_name) in enumerate(tbar):
    img_src = os.path.join(voc_images, img_name)
    xml_src = os.path.join(voc_annotations, xml_name)
    # voc images copy to coco images
    shutil.copy(img_src, coco_images)
    # voc xml annotations copy to path 'xml_train'
    shutil.copy(xml_src, xml_train)
    tbar.set_description('COPYING |')
logger.info("%s files copied to %s" % (val_files_num, xml_val))

# split xmls xmls_test to xml_vla and xml_train 
allxml = os.listdir(xml_train)
if len(allxml) > 0 or len(allxml) < val_files_num + test_files_num:
    flist = random.sample(allxml, val_files_num + test_files_num)
    val_list, test_list = flist[:val_files_num], flist[val_files_num:]
    for val_fname, test_fname in zip(val_list, test_list):
        src_val = os.path.join(xml_train, val_fname)
        shutil.move(src_val, xml_val)
        src_test = os.path.join(xml_train, test_fname)
        shutil.move(src_test, xml_test)
else:
    logger.fatal('The xmls folders are empty or Not enough to split into val and test, please make sure there are enough %d files to move' % (val_files_num))
         
logger.info("*" * 27 + "[Xmls segmentation Done ! Go check your file ]" + "*" * 28)

# # !/usr/bin/python

# pip install lxml


START_BOUNDING_BOX_ID = 1
PRE_DEFINE_CATEGORIES = None


# If necessary, pre-define category and its id
#  PRE_DEFINE_CATEGORIES = {"aeroplane": 1, "bicycle": 2, "bird": 3, "boat": 4,
#  "bottle":5, "bus": 6, "car": 7, "cat": 8, "chair": 9,
#  "cow": 10, "diningtable": 11, "dog": 12, "horse": 13,
#  "motorbike": 14, "person": 15, "pottedplant": 16,
#  "sheep": 17, "sofa": 18, "train": 19, "tvmonitor": 20}

"""
main code below are from
https://github.com/Tony607/voc2coco
"""


def get(root, name):
    vars = root.findall(name)
    return vars


def get_and_check(root, name, length):
    vars = root.findall(name)
    if len(vars) == 0:
        raise ValueError("Can not find %s in %s." % (name, root.tag))
    if length > 0 and len(vars) != length:
        raise ValueError(
            "The size of %s is supposed to be %d, but is %d."
            % (name, length, len(vars))
        )
    if length == 1:
        vars = vars[0]
    return vars


def get_filename_as_int(filename):
    try:
        filename = filename.replace("_", "")
        filename = os.path.splitext(os.path.basename(filename))[0]
        return int(filename)
    except:
        raise ValueError("Filename %s is supposed to be an integer." % (filename))


def get_categories(xml_files):
    """Generate category name to id mapping from a list of xml files.

    Arguments:
        xml_files {list} -- A list of xml file paths.

    Returns:
        dict -- category name to id mapping.
    """
    classes_names = []
    for xml_file in xml_files:
        tree = ET.parse(xml_file)
        root = tree.getroot()
        for member in root.findall("object"):
            classes_names.append(member[0].text)
    classes_names = list(set(classes_names))
    classes_names.sort()
    return {name: i for i, name in enumerate(classes_names)}


def convert(xml_files, json_file, split='unknown'):
    json_dict = {"images": [], "type": "instances", "annotations": [], "categories": []}
    if PRE_DEFINE_CATEGORIES is not None:
        categories = PRE_DEFINE_CATEGORIES
    else:
        categories = get_categories(xml_files)
    bnd_id = START_BOUNDING_BOX_ID
    tbar = tqdm(xml_files)
    for xml_file in tbar:
        tree = ET.parse(xml_file)
        root = tree.getroot()
        path = get(root, "path")
        if len(path) == 1:
            filename = os.path.basename(path[0].text)
        elif len(path) == 0:
            filename = get_and_check(root, "filename", 1).text
        else:
            raise ValueError("%d paths found in %s" % (len(path), xml_file))
        ## The filename must be a number
        image_id = get_filename_as_int(filename)
        size = get_and_check(root, "size", 1)
        width = int(get_and_check(size, "width", 1).text)
        height = int(get_and_check(size, "height", 1).text)
        image = {
            "file_name": filename,
            "height": height,
            "width": width,
            "id": image_id,
        }
        json_dict["images"].append(image)
        ## Currently we do not support segmentation.
        #  segmented = get_and_check(root, 'segmented', 1).text
        #  assert segmented == '0'
        for obj in get(root, "object"):
            category = get_and_check(obj, "name", 1).text
            if category not in categories:
                new_id = len(categories)
                categories[category] = new_id
            category_id = categories[category]
            bndbox = get_and_check(obj, "bndbox", 1)
            xmin = int(float(get_and_check(bndbox, "xmin", 1).text)) - 1
            ymin = int(float(get_and_check(bndbox, "ymin", 1).text)) - 1
            xmax = int(float(get_and_check(bndbox, "xmax", 1).text))
            ymax = int(float(get_and_check(bndbox, "ymax", 1).text))
            assert xmax > xmin
            assert ymax > ymin
            o_width = abs(xmax - xmin)
            o_height = abs(ymax - ymin)
            ann = {
                "area": o_width * o_height,
                "iscrowd": 0,
                "image_id": image_id,
                "bbox": [xmin, ymin, o_width, o_height],
                "category_id": category_id,
                "id": bnd_id,
                "ignore": 0,
                "segmentation": [],
            }
            json_dict["annotations"].append(ann)
            bnd_id = bnd_id + 1
        tbar.set_description('SPLIT:{}'.format(split))

    for cate, cid in categories.items():
        cat = {"supercategory": "none", "id": cid, "name": cate}
        json_dict["categories"].append(cat)

    os.makedirs(os.path.dirname(json_file), exist_ok=True)
    json_fp = open(json_file, "w")
    json_str = json.dumps(json_dict)
    json_fp.write(json_str)
    json_fp.close()


xml_val_files = glob.glob(os.path.join(xml_val, "*.xml"))
xml_test_files = glob.glob(os.path.join(xml_test, "*.xml"))
xml_train_files = glob.glob(os.path.join(xml_train, "*.xml"))

convert(xml_val_files, coco_json_annotations + 'val2017.json', split='val')
convert(xml_test_files, coco_json_annotations+'test2017.json', split='test')
convert(xml_train_files, coco_json_annotations + 'train2017.json', split='train')

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
你可以使用一些工具和代码来将VOC格式数据换为COCO格式,以便在mmdetection中使用。以下是一种可能的方法: 1. 首先,确保你已经安装了`mmcv`和`mmdet`库。你可以使用以下命令安装它们: ``` pip install mmcv-full mmdet ``` 2. 下载并解压VOC数据集,并将其组织为以下结构: ``` VOCdevkit/ ├── VOC2007 │ ├── Annotations │ ├── ImageSets │ └── JPEGImages └── VOC2012 ├── Annotations ├── ImageSets └── JPEGImages ``` 3. 创建一个Python脚本,例如`voc2coco.py`,并使用以下代码来进行VOCCOCO格式换: ```python import os import json from xml.etree.ElementTree import Element, SubElement, tostring from xml.dom.minidom import parseString def parse_voc_annotation(ann_dir, img_dir, labels=[]): # 读取VOC标注文件和图像文件夹 ann_files = sorted(os.listdir(ann_dir)) img_files = sorted(os.listdir(img_dir)) assert len(ann_files) == len(img_files), "Number of annotation files doesn't match number of image files" # 构建COCO格式标注数据结构 coco_data = { "images": [], "annotations": [], "categories": [] } category_id = 0 for i, ann_file in enumerate(ann_files): img_file = img_files[i] img_id = i + 1 # 解析VOC标注文件 ann_path = os.path.join(ann_dir, ann_file) tree = parseString(open(ann_path).read()) root = tree.documentElement # 获取图像信息 img_width = int(root.getElementsByTagName("width")[0].childNodes[0].data) img_height = int(root.getElementsByTagName("height")[0].childNodes[0].data) img_name = img_file.split(".")[0] # 添加图像信息到coco_data["images"] coco_data["images"].append({ "file_name": img_file, "height": img_height, "width": img_width, "id": img_id }) # 解析VOC标注信息 objects = root.getElementsByTagName("object") for obj in objects: name = obj.getElementsByTagName("name")[0].childNodes[0].data if name not in labels: labels.append(name) category_id = labels.index(name) + 1 bbox = obj.getElementsByTagName("bndbox")[0] xmin = int(bbox.getElementsByTagName("xmin")[0].childNodes[0].data) ymin = int(bbox.getElementsByTagName("ymin")[0].childNodes[0].data) xmax = int(bbox.getElementsByTagName("xmax")[0].childNodes[0].data) ymax = int(bbox.getElementsByTagName("ymax")[0].childNodes[0].data) width = xmax - xmin height = ymax - ymin # 添加标注信息到coco_data["annotations"] coco_data["annotations"].append({ "image_id": img_id, "category_id": category_id, "bbox": [xmin, ymin, width, height], "area": width * height, "iscrowd": 0, "id": len(coco_data["annotations"]) + 1 }) # 构建coco_data["categories"] for i, label in enumerate(labels): coco_data["categories"].append({ "id": i + 1, "name": label, "supercategory": "object" }) return coco_data if __name__ == '__main__': ann_dir = 'VOCdevkit/VOC2007/Annotations' img_dir = 'VOCdevkit/VOC2007/JPEGImages' labels = [] coco_data = parse_voc_annotation(ann_dir, img_dir, labels) # 保存为COCO格式的JSON文件 with open('annotations.json', 'w') as f: json.dump(coco_data, f) ``` 4. 运行`voc2coco.py`脚本,它将生成一个名为`annotations.json`的COCO格式标注文件。 现在,你可以使用这个生成的COCO格式的标注文件在mmdetection中进行训练或评估。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值