【代码记录】(图像分割数据)转(VOC目标检测数据)转(coco目标检测数据)

最近帮忙跑了一个实验,要用目标检测算法检测处理分割数据集,于是就涉及到了这几种数据的互相转换的问题。

原始数据集:

红外小目标数据,如下图所示:
原图
GT

(图像分割数据)转(VOC目标检测数据)

主要利用opencv里面的外界矩阵方法获得边框,然后存储到xml文件中去:

import cv2
import numpy as np
import pandas as pd
import os
 
def cv_show(img, name):
    cv2.imshow(name, img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
 
def get_coor(img):
    # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 变为灰度图
    # ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)  ## 阈值分割得到二值化图片
    # contours, heriachy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours, heriachy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # rect = cv2.minAreaRect(contours[0])     #opencv方法
    # points = cv2.boxPoints(rect)
    # points = np.int0(points)
    conter_lists=[]

    if len(contours)>1:
        print('debug_begin...')
    
    for i, contour in enumerate(contours):
        # if i == len(contours)-1:
        print('i:', i)
        a = sorted(contour[:, 0], key=lambda x: x[0])  # 所有坐标按x轴从小到大排序
        x_min = a[0][0]
        x_max = a[-1][0]
        b = sorted(contour[:, 0], key=lambda x: x[1])  # 所有坐标按y轴从小到大排序
        y_min = b[0][1]
        y_max = b[-1][1]
        # rec = img
            # cv2.drawContours(img, contours, i, (0, 0, 255), 5)
        # cv2.rectangle(rec, (x_min, y_min), (x_max, y_max), (0, 255, 0), 3)
        # cv2.imshow('rectangle', rec)
        # cv2.waitKey()
        if x_min==x_max:
            x_min=x_min-2
        if y_min==y_max:
            y_min=y_min-2

        assert(x_min >= 0), "x_min > 0"
        assert(y_min >= 0), "y_min > 0"

        conter_lists.append([x_min, y_min, x_max, y_max])
    return conter_lists

def save_xml_top(src_xml_dir, img_name, h, w):
 
    xml_file = open((src_xml_dir + '/' + img_name + '.xml'), 'a')
    xml_file.write('<annotation>\n')
    xml_file.write('    <folder>NUST</folder>\n')
    xml_file.write('    <filename>' + str(img_name) + '.png' + '</filename>\n')
    xml_file.write('    <size>\n')
    xml_file.write('        <width>' + str(w) + '</width>\n')
    xml_file.write('        <height>' + str(h) + '</height>\n')
    xml_file.write('        <depth>3</depth>\n')
    xml_file.write('    </size>\n')
    xml_file.write('    <segmented>0</segmented>\n')
 
def save_xml_mid(src_xml_dir, img_name, x1, y1, x2, y2):
 
    xml_file = open((src_xml_dir + '/' + img_name + '.xml'), 'a')
    # xml_file.write('\n')
    xml_file.write('    <object>\n')
    xml_file.write('        <name>' + 'Target' + '</name>\n')
    xml_file.write('        <pose>Unspecified</pose>\n')
    xml_file.write('        <truncated>0</truncated>\n')
    xml_file.write('        <difficult>0</difficult>\n')
    xml_file.write('        <pixels>\n')
    xml_file.write('            <id>1</id>\n')
    xml_file.write('        </pixels>\n')
    xml_file.write('        <bndbox>\n')
    xml_file.write('            <xmin>' + str(x1) + '</xmin>\n')
    xml_file.write('            <ymin>' + str(y1) + '</ymin>\n')
    xml_file.write('            <xmax>' + str(x2) + '</xmax>\n')
    xml_file.write('            <ymax>' + str(y2) + '</ymax>\n')
    xml_file.write('        </bndbox>\n')
    xml_file.write('    </object>\n')

def save_xml_bot(src_xml_dir, img_name):
    xml_file = open((src_xml_dir + '/' + img_name + '.xml'), 'a')
    # xml_file.write('\n')
    xml_file.write('</annotation>')
 
file_dir = 'test_data_gt/'
save_xml_dir = 'voc_label_general/'
for name in os.listdir(file_dir):
    # print(name)
    # if name[-5]=='2':
    img_path = os.path.join(file_dir, name)
    img = cv2.imread(img_path,flags=0)
    # img = cv2.imdecode(np.fromfile(img_path), dtype=np.uint8), -1)
    h, w = img.shape[0],img.shape[1]
    img_name = name.split('.')[0]
    print(img_name)
    contour_point_lists=get_coor(img)

    save_xml_top(save_xml_dir,img_name, h, w)
    for i, contour_point in enumerate(contour_point_lists):
        save_xml_mid(save_xml_dir,img_name, contour_point[0], contour_point[1], contour_point[2], contour_point[3])
    save_xml_bot(save_xml_dir,img_name)
 
(VOC目标检测数据)转(coco目标检测数据)

实验的算法是coco数据格式,之后将voc数据转为coco格式:

import os
import glob
import json
import shutil
import numpy as np
import xml.etree.ElementTree as ET
 
START_BOUNDING_BOX_ID = 1
 
def get(root, name):
    return root.findall(name)
 
def get_and_check(root, name, length):
    vars = root.findall(name)
    if len(vars) == 0:
        raise NotImplementedError('Can not find %s in %s.'%(name, root.tag))
    if length > 0 and len(vars) != length:
        raise NotImplementedError('The size of %s is supposed to be %d, but is %d.'%(name, length, len(vars)))
    if length == 1:
        vars = vars[0]
    return vars
 
 
def convert(xml_list, json_file):
    json_dict = {"info":['none'], "license":['none'], "images": [], "annotations": [], "categories": []}
    categories = pre_define_categories.copy()
    bnd_id = START_BOUNDING_BOX_ID
    all_categories = {}
    for index, line in enumerate(xml_list):
        # print("Processing %s"%(line))
        xml_f = line
        tree = ET.parse(xml_f)
        root = tree.getroot()
        
        filename = os.path.basename(xml_f)[:-4] + ".png"

        #test_data    
        image_id = filename.split('.')[0][-3:]

        #train_data
        # image_id = filename.split('.')[0][-6:]

#         print('filename is {}'.format(image_id))
        
        size = get_and_check(root, 'size', 1)
        width = int(get_and_check(size, 'width', 1).text)
        height = int(get_and_check(size, 'height', 1).text)
        #test_data
        image = {'file_name': filename, 'height': height, 'width': width, 'id':image_id}

        #train_data
        # image = {'file_name': filename[:7]+'1.png', 'height': height, 'width': width, 'id':image_id}
        json_dict['images'].append(image)
        ## Cruuently we do not support segmentation
        #  segmented = get_and_check(root, 'segmented', 1).text
        #  assert segmented == '0'
        for obj in get(root, 'object'):
            category = get_and_check(obj, 'name', 1).text
            if category in all_categories:
                all_categories[category] += 1
            else:
                all_categories[category] = 1
            if category not in categories:
                if only_care_pre_define_categories:
                    continue
                new_id = len(categories) + 1
                print("[warning] category '{}' not in 'pre_define_categories'({}), create new id: {} automatically".format(category, pre_define_categories, new_id))
                categories[category] = new_id
            category_id = categories[category]
            bndbox = get_and_check(obj, 'bndbox', 1)
            xmin = int(float(get_and_check(bndbox, 'xmin', 1).text))
            ymin = int(float(get_and_check(bndbox, 'ymin', 1).text))
            xmax = int(float(get_and_check(bndbox, 'xmax', 1).text))
            ymax = int(float(get_and_check(bndbox, 'ymax', 1).text))
            assert(xmax > xmin), "xmax <= xmin, {}".format(line)
            assert(ymax > ymin), "ymax <= ymin, {}".format(line)
            o_width = abs(xmax - xmin)
            o_height = abs(ymax - ymin)
            ann = {'area': o_width*o_height, 'iscrowd': 0, 'image_id':
                   image_id, 'bbox':[xmin, ymin, o_width, o_height],
                   'category_id': category_id, 'id': bnd_id, 'ignore': 0,
                   'segmentation': [[xmin,ymin,xmin,ymax,xmax,ymax,xmax,ymin]]}
            json_dict['annotations'].append(ann)
            bnd_id = bnd_id + 1
 
    for cate, cid in categories.items():
        cat = {'supercategory': 'none', 'id': cid, 'name': cate}
        json_dict['categories'].append(cat)
    json_fp = open(json_file, 'w')
    json_str = json.dumps(json_dict)
    json_fp.write(json_str)
    json_fp.close()
    print("------------create {} done--------------".format(json_file))
    print("find {} categories: {} -->>> your pre_define_categories {}: {}".format(len(all_categories), all_categories.keys(), len(pre_define_categories), pre_define_categories.keys()))
    print("category: id --> {}".format(categories))
    print(categories.keys())
    print(categories.values())
 
 
if __name__ == '__main__':
 	# xml标注文件夹   
    xml_dir = './test_data_voc'
    # 训练数据的josn文件
    save_json_train = './data_coco/train.json'
    # 验证数据的josn文件
    save_json_val = './data_coco/val.json'
    # 验证数据的test文件
    save_json_test = './data_coco/test.json'
    # 类别,如果是多个类别,往classes中添加类别名字即可,比如['dog', 'person', 'cat']
    classes = ['Target']
    pre_define_categories = {}
    for i, cls in enumerate(classes):
        pre_define_categories[cls] = i+1
    
    only_care_pre_define_categories = True

    # 训练数据集比例 
    train_ratio = 0
    val_ratio = 1
    print('xml_dir is {}'.format(xml_dir))
    xml_list = glob.glob(xml_dir + "/*.xml")  
    # xml_list = np.sort(xml_list)
#     print('xml_list is {}'.format(xml_list))
    np.random.seed(100)
    np.random.shuffle(xml_list)
 
    train_num = int(len(xml_list)*train_ratio)
    val_num = int(len(xml_list)*val_ratio)
    print('训练样本数目是 {}'.format(train_num))
    print('验证样本数目是 {}'.format(val_num))
    print('测试样本数目是 {}'.format(len(xml_list) - train_num - val_num))
    xml_list_val = xml_list[:val_num]
    xml_list_train = xml_list[val_num:train_num+val_num]
    xml_list_test = xml_list[train_num+val_num:]  
    # # 对训练数据集对应的xml进行coco转换   
    convert(xml_list_train, save_json_train)
    # # 对验证数据集的xml进行coco转换
    convert(xml_list_val, save_json_val)
    # 对测试数据集的xml进行coco转换
    convert(xml_list_test, save_json_test)
数据读取和方框绘制

使用coco库读取.json文件:

from pycocotools.coco import COCO
from PIL import Image,ImageDraw
coco = COCO(label_path)
img = Image.open(os.path.join(pic_path,pic_name)).convert('RGB')
draw = ImageDraw.Draw(img)
#draw_labels
ann_ids = coco.getAnnIds(imgIds=[img_ids[i]])
ann_info = coco.loadAnns(ann_ids)
for j in range(len(ann_info)):  
   x,y,w,h=ann_info[j]['bbox'] 
   x1,y1,x2,y2 = int(x),int(y),int(x+w),int(y+h)
   draw.rectangle((x1,y1,x2,y2))
    

绘制结果:
在这里插入图片描述
在这里插入图片描述

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值