Python COCO数据集转VOC/旷世数据集转VOC

最近需要对COCO数据集与旷世数据集进行处理,在网上查了相关资料后感觉不是特别多。COCO最起码还有API支持,旷世基本都没有,因此做个笔记,简要的写个脚本希望可以帮助到相关同学。脚本简陋,只起到抛砖引玉的作用,还望海涵。

github: https://github.com/pansionpan/convert_coco_object365


1.COCO/旷世数据集转VOC

不多啰嗦直接上代码:

一共三个py文件:

main.py

import os
import argparse
from cooc_annos2voc import main_coco
from object365_annos2voc import main_object365

headstr = """\
<annotation>
    <folder>VOC</folder>
    <filename>%s</filename>
    <source>
        <database>My Database</database>
        <annotation>COCO</annotation>
        
        <flickrid>NULL</flickrid>
    </source>
    <owner>
        <flickrid>NULL</flickrid>
        <name>company</name>
    </owner>
    <size>
        <width>%d</width>
        <height>%d</height>
        <depth>%d</depth>
    </size>
    <segmented>0</segmented>
"""

objstr = """\
    <object>
        <name>%s</name>
        <pose>Unspecified</pose>
        <truncated>0</truncated>
        <difficult>0</difficult>
        <bndbox>
            <xmin>%d</xmin>
            <ymin>%d</ymin>
            <xmax>%d</xmax>
            <ymax>%d</ymax>
        </bndbox>
    </object>
"""

tailstr = '''\
</annotation>
'''

def make_parser():
    parser = argparse.ArgumentParser(description="conver annotations format to VOC")
    parser.add_argument("-d", "--dataset", dest="dataset", required=True, help="dataset name", type=str, choices=["coco", "object365"])
    parser.add_argument("-i", "--input", dest="input_dir", required=True, help="the input dir of the picture", type=str)
    parser.add_argument("-y", "--dataset-year", dest="dyear", default="", help="the year for coco dataset", type=str)
    parser.add_argument("-c", "--class", dest="classes", nargs = "*", help="the class want to select, ['cell phone', 'handbag', 'laptop', 'cat', 'dog']", type=str)
    parser.add_argument("--output-class", dest="output_class", action='store_true', default=False, help="output the class on the list.txt")
    parser.add_argument("-o", "--output", dest="output_dir", help="the output dir of the result", type=str)
    return parser


def main():
    parser = make_parser()

    args = vars(parser.parse_args())
    dataset = args["dataset"]
    input_dir = args["input_dir"]
    dyear = args["dyear"]
    classes = args["classes"]
    output_class = args["output_class"]
    output_dir = args["output_dir"]

    # anno_dir = os.path.join(output_dir, "annotations")

    if dataset == "object365":
        #  python main.py -p object365 -i "C:\kuangshi\Objects365\Images\val\val\val" -o "C:\kuangshi\result/"
        # paramter: headstr, tailstr, objstr, input_dir, anno_dir, output_dir
        main_object365(input_dir, output_dir, headstr, tailstr, objstr)
    elif dataset == "coco":
        # python main.py -p coco -i "C:\coco\val2014" -o "C:\coco\val2014\result/"
        if dyear == "":
            print("dataset-year is required")
            exit(1)
        main_coco(input_dir, dyear, classes, output_class, output_dir, headstr, objstr, tailstr)

if __name__ == '__main__':
    main()


coco转VOC

from pycocotools.coco import COCO
import os
import shutil
from tqdm import tqdm
import matplotlib.pyplot as plt
import cv2
from PIL import Image, ImageDraw
import argparse

def mkr(path):
    if os.path.exists(path):
        shutil.rmtree(path)
    
    os.makedirs(path)

def id2name(coco):
    classes=dict()
    for cls in coco.dataset['categories']:
        classes[cls['id']]=cls['name']
    return classes


def write_xml(anno_path, objstr, head, objs, tail):
    print("write xml file: ", anno_path)
    with open(anno_path, 'w', encoding='utf-8') as f:
        f.write(head)
        for obj in objs:
            f.write(objstr % (obj[0], obj[1], obj[2], obj[3], obj[4]))
        f.write(tail)

def save_annos(img_path, anno_path, filename, objs, headstr, objstr, tailstr):
    img = cv2.imread(img_path)
    if (img.shape[2] == 1):
        print(filename + " not a RGB image")
        return
    # shutil.copy(img_path, dst_imgpath)

    head = headstr % (filename, img.shape[1], img.shape[0], img.shape[2])
    write_xml(anno_path, objstr, head, objs, tailstr)


def create_annos(coco, img_id, cls_map, cls_ids):
    annIds = coco.getAnnIds(imgIds = [img_id], catIds = cls_ids, iscrowd = None)
    anns = coco.loadAnns(annIds)

    print(anns)

    objs = []
    for ann in anns:
        cls_id = ann['category_id']
        if cls_id in cls_ids and 'bbox' in ann:
            bbox = ann['bbox']
            xmin = int(bbox[0])
            ymin = int(bbox[1])
            xmax = int(bbox[2] + bbox[0])
            ymax = int(bbox[3] + bbox[1])
            obj = [cls_map[cls_id], xmin, ymin, xmax, ymax, cls_id]
            objs.append(obj)

    return objs

def main_coco(input_dir, dyear, class_names, output_class, output_dir, headstr, objstr, tailstr):
    for dataset in ["train", "val"]:
        img_dir = os.path.join(input_dir, dataset + dyear)
        anno_dir = os.path.join(output_dir, 'annotations_xml_{}'.format(dataset))
        if not os.path.exists(anno_dir):
            mkr(anno_dir)

        annFile = os.path.join(input_dir, "annotations", "instances_{}{}.json".format(dataset, dyear))

        list_file = os.path.join(output_dir, 'annotations_xml_coco_{}{}.txt'.format(dataset, dyear))

        coco = COCO(annFile)

        #show all classes in coco
        cls_map = id2name(coco)

        #[1, 2, 3, 4, 6, 8]
        cls_ids = coco.getCatIds(catNms = class_names)
        print("class_ids:", cls_ids)

        # accord to the class_id find all images id
        img_ids = []
        for cls_id in cls_ids:
            img_ids.extend(coco.getImgIds(catIds = cls_id))
        img_ids = set(img_ids)
        print("image ids:", img_ids)

        print("list_file:", list_file)
        with open(list_file, 'w', encoding='utf-8') as f:
            for imgId in tqdm(img_ids):
                img = coco.loadImgs(imgId)
                filename = img[0]['file_name']
                img_id = img[0]['id']

                objs = create_annos(coco, img_id, cls_map, cls_ids)

                anno_path = os.path.join(anno_dir, filename[:-3] + 'xml')
                img_path = os.path.join(img_dir, filename)

                save_annos(img_path, anno_path, filename, objs, headstr, objstr, tailstr)

                # write list file
                line = anno_path + "\t" + img_path + "\t"
                if output_class:
                    object_cls_ids = set([str(obj[5]) for obj in objs])
                    print("cls_ids:", object_cls_ids)
                    line += "\t".join(object_cls_ids)

                line += "\n"

                f.write(line)

 object365转VOC

import json
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import cv2
import shutil
import os
import subprocess
import argparse
import glob
from tqdm import tqdm

"""
提取每个图片对应的category与bbox值,写入json然后转成需要的VOC格式
"""

# cellphone:79 key:266 handbag:13 laptop:77
classes_names = {79: "cellphone", 266: "key", 13: "handbag", 77: "laptop"}

def save_annotations(anno_file_path, imgs_file_path, output_anno_dir, output_dir, headstr, tailstr, objectstr, dataset):
    # open json file(val.json or train.json)
    with open(anno_file_path, 'r') as f:
        data = json.load(f)
        print("提取长度:", len(data["annotations"]))
        # iterate all annotations imformation
        for i in range(0, len(data["annotations"])):
            # check category class whether in the classes list
            if data["annotations"][i]["category_id"] in classes_names.keys():
                # find the image id which class meet the confitions
                class_imgs_id = data["annotations"][i]["image_id"]
                print("class_imgs_id:", class_imgs_id)
                for j in range(0, len(data["images"])):
                    objs = []
                    if class_imgs_id == data["images"][j]["id"]:
                        print(data["images"][j]["file_name"])
                        # img_path use to find the image path
                        img_path = os.path.join(imgs_file_path, data["images"][j]["file_name"])
                        # bbox
                        bbox = data["annotations"][i]["bbox"]
                        xmin = int(bbox[0])
                        ymin = int(bbox[1])
                        xmax = int(bbox[2] + bbox[0])
                        ymax = int(bbox[3] + bbox[1])
                        class_name = classes_names.get(int(data["annotations"][i]["category_id"]))
                        obj = [class_name, xmin, ymin, xmax, ymax, class_name]
                        objs.append(obj)

                        img_name = os.path.basename(img_path)
                        save_head(objs, img_name, img_path, output_anno_dir, output_dir, headstr, tailstr, objectstr, dataset)

    print(" 提取完成 ")


def mkr(path):
    if os.path.exists(path):
        shutil.rmtree(path)

    os.makedirs(path)

def write_txt(output_dir, anno_path, img_path, dataset):
    list_name = output_dir + '/annotations_xml_object_{}.txt'.format(dataset)
    if not os.path.exists(list_name):
        with open(list_name, 'w', encoding="utf=8") as fs:
            print(fs)
    with open(list_name, 'r', encoding='utf-8') as list_fs:
        with open(list_name, 'a+', encoding='utf-8') as list_f:
            lines = anno_path + "\t" + img_path + "\n"
            list_f.write(lines)



def write_xml(anno_path, objs, img_path, output_dir, head, objectstr, tailstr, dataset):
    print(anno_path)
    # 如果xml第一次被写入则直接写入即可
    if not os.path.exists(anno_path):
        with open(anno_path, 'w') as f:
            f.write(head)
            for obj in objs:
                f.write(objectstr % (obj[0], obj[1], obj[2], obj[3], obj[4]))
            f.write(tailstr)
            write_txt(output_dir, anno_path, img_path, dataset)
    # 如果classes则追加写入
    else:
        with open(anno_path, 'r', encoding='utf-8') as fs:
            content = fs.read()
            with open(anno_path, 'w', encoding='utf-8') as f:
                end_annotation = content.rfind("</annotation>")
                print(end_annotation)
                f.write(content[:-14])
                for obj in objs:
                    f.write(objectstr % (obj[0], obj[1], obj[2], obj[3], obj[4]))
                f.write(tailstr)
    # write_txt(output_dir, anno_path, img_path, dataset, classes_name)


def save_head(objs, img_name, img_path, output_anno_dir, output_dir, headstr, tailstr, objectstr, dataset):
    imgs = cv2.imread(img_path)
    anno_path = os.path.join(output_anno_dir, img_name[:-3] + "xml")
    print("anno_path:", anno_path)

    if (imgs.shape[2] == 1):
        print(img_name + " not a RGB image")
        return

    head = headstr % (img_name, imgs.shape[1], imgs.shape[0], imgs.shape[2])
    write_xml(anno_path, objs, img_path, output_dir, head, objectstr, tailstr, dataset)


def find_anno_img(input_dir):
    # According input dir path find Annotations dir and Images dir
    anno_dir = os.path.join(input_dir, "Annotations")
    img_dir = os.path.join(input_dir, "Images")
    return anno_dir, img_dir


def main_object365(input_dir, output_dir, headstr, tailstr, objectstr):
    anno_dir, img_dir = find_anno_img(input_dir)
    for dataset in ["val"]:
        # xml output dir path
        output_anno_dir = os.path.join(output_dir, "annotations_xml_{}".format(dataset))
        if not os.path.exists(output_anno_dir):
            mkr(output_anno_dir)

        # read jsons file
        anno_file_path = os.path.join(anno_dir, "{}".format(dataset), "{}".format(dataset)+".json")
        # read imgs file
        imgs_file_path = os.path.join(img_dir, "{}".format(dataset))
        save_annotations(anno_file_path, imgs_file_path, output_anno_dir, output_dir,headstr, tailstr, objectstr, dataset)

以上是一个脚本,有转coco或object365两种功能。具体运行方式见以下github地址:

https://github.com/pansionpan/convert_coco_object365

如有问题可以留言提问。

评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值