Dagm数据集转换为VOC格式

Dagm数据集转换为VOC格式

由于dagm数据集的标签过于简陋,是椭圆形状的标签,最近实验室同学重新标注了dagm数据集,用来跑其他代码时,都是采用的VOC格式,因此写了以下脚本,将DAGM格式的数据集转换为VOC格式,VOC格式的数据集各个文件夹内容不懂的可以参照 VOC数据集格式解读, 使用时直接指定DAGM数据集的根目录和需要存放转换后数据集的文件夹,另外创建一个palette.json,并将内容输入就可以了。

注:class序号从1开始

import os
import cv2
import numpy as np
import os
import shutil
from tqdm import tqdm
from PIL import Image
import json
# voc 格式介绍请参照 https://www.cnblogs.com/gshang/p/12964606.html
def makDirs(path):
    if not(os.path.exists(path)):
        os.makedirs(path)

def creat_xml(xmldir,file,classname,xmin,ymin,xmax,ymax):
    xml_file = open(os.path.join(xmldir,file+".xml"), 'w')
    xml_file.write('<annotation>\n')
    xml_file.write('\t<folder>'+"JPEGImages"+'</folder>\n')
    xml_file.write('\t<filename>' + file+".jpg" + '</filename>\n')
    # xml_file.write('\t<path>' + imagePath + '</path>\n')
    xml_file.write('\t<source>\n')
    xml_file.write('\t\t<database>' + 'Unknown' + '</database>\n')
    # xml_file.write('\t\t<annotation>' + 'simple' + '</annotation>\n')
    # xml_file.write('\t\t\n')
    # xml_file.write('\t\t<flickrid>325991873</flickrid>\n')
    xml_file.write('\t</source>\n')
    # xml_file.write('\t<owner>\n')
    # xml_file.write('\t\t<flickrid>archin</flickrid>\n')
    # xml_file.write('\t\t<name>?</name>\n')
    # xml_file.write('\t</owner>\n')
    xml_file.write('\t<size>\n')
    xml_file.write('\t\t<width>' + "512" + '</width>\n')
    xml_file.write('\t\t<height>' + "512" + '</height>\n')
    xml_file.write('\t\t<depth>3</depth>\n')
    xml_file.write('\t</size>\n')
    xml_file.write('\t<segmented>0</segmented>\n')
    # write the region of text on xml file
    for ann in [1]:
        xml_file.write('\t<object>\n')
        xml_file.write('\t\t<name>' + classname + '</name>\n')
        xml_file.write('\t\t<pose>Unspecified</pose>\n')
        xml_file.write('\t\t<truncated>0</truncated>\n')
        xml_file.write('\t\t<difficult>0</difficult>\n')
        xml_file.write('\t\t<bndbox>\n')
        xml_file.write('\t\t\t<xmin>' + str(int(xmin)) + '</xmin>\n')
        xml_file.write('\t\t\t<ymin>' + str(int(ymin)) + '</ymin>\n')
        xml_file.write('\t\t\t<xmax>' + str(int(xmax)) + '</xmax>\n')
        xml_file.write('\t\t\t<ymax>' + str(int(ymax)) + '</ymax>\n')
        xml_file.write('\t\t</bndbox>\n')
        xml_file.write('\t</object>\n')
    xml_file.write('</annotation>')
    xml_file.close()

def main(dagm_dir,voc_dir):
    # parase dagm
    CLASSES = ('Class1', 'Class2', 'Class3', 'Class4', 'Class5',
               'Class6', 'Class7', 'Class8', 'Class9', 'Class10')
    class2index=dict(zip(CLASSES,range(1,11)))
    dagmdataset=[]
    for classname in CLASSES:
        for datatype in ('Train','Test'):
            imgdir = os.path.join(dagm_dir, classname, datatype)
            label_imgdir = os.path.join(
                dagm_dir, classname, datatype, 'Label')
            imgs = os.listdir(imgdir)
            imgs = [x for x in imgs if '.PNG' in x]
            imgs_with_label = os.listdir(label_imgdir)
            imgs_with_label = [
                x[:-10]+'.PNG' for x in imgs_with_label if '.PNG' in x]
            imgs_no_label = [i for i in imgs if i not in imgs_with_label]
            classdata=dict(
                imgroot=dagm_dir,
                classname=classname,
                datatype=datatype,
                label_data=imgs_with_label,
                unlabel_data=imgs_no_label
            )
            dagmdataset.append(classdata)


    #
    train_list=[]
    test_list=[]
    for classdata in tqdm(dagmdataset):
        class_train_list_label=[]
        class_test_list_label=[]
        for imgname in tqdm(classdata["label_data"]):
            new_name=classdata["classname"][5:].rjust(2,'0')+"_"+imgname.split(".")[0]+".jpg"
            if classdata["datatype"]=="Train":
                class_train_list_label.append(new_name)
                train_list.append(new_name)
            else:
                class_test_list_label.append(new_name)
                test_list.append(new_name)
            imgpath=os.path.join(classdata["imgroot"],classdata["classname"],classdata["datatype"],imgname)
            dst_imgpath=os.path.join(voc_dir,"JPEGImages",new_name)
            src_img=cv2.imread(imgpath)
            cv2.imwrite(dst_imgpath,src_img)
            # shutil.copy(imgpath, dst_imgpath)

            segpath=os.path.join(classdata["imgroot"],classdata["classname"],classdata["datatype"],"Label",imgname[:4]+"_label.PNG")
            dst_segpath=os.path.join(voc_dir,"SegmentationClass",new_name.split(".")[0]+".png")
            mask=cv2.imread(segpath, 0)
            contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            mask[np.where(mask==255)]=class2index[classdata["classname"]]
            cv2.drawContours(mask, contours, -1, 255, 1)
            cv2.imwrite(dst_segpath,mask)
            points=contours[0][:,0,:]
            xmin,xmax=points[:,0].min(),points[:,0].max()
            ymin,ymax=points[:,1].min(),points[:,1].max()
            creat_xml(os.path.join(voc_dir,"Annotations"),new_name.split(".")[0],classdata["classname"],xmin,ymin,xmax,ymax)
                        
        class_train_list_nolabel=[]
        class_test_list_nolabel=[]
        for imgname in tqdm(classdata["unlabel_data"]):
            
            new_name=classdata["classname"][5:].rjust(2,'0')+"_"+imgname.split(".")[0]+".jpg"
            if classdata["datatype"]=="Train":
                class_train_list_nolabel.append(new_name)
                # train_list.append(new_name)
            else:
                class_test_list_nolabel.append(new_name)
                # test_list.append(new_name)

            imgpath=os.path.join(classdata["imgroot"],classdata["classname"],classdata["datatype"],imgname)
            dst_imgpath=os.path.join(voc_dir,"JPEGImages",new_name)
            src_img=cv2.imread(imgpath)
            cv2.imwrite(dst_imgpath,src_img)
            # shutil.copy(imgpath, dst_imgpath)

            
            # dst_segpath=os.path.join(voc_dir,"SegmentationClass",new_name)
            # mask = np.zeros((512,512))
            # cv2.imwrite(dst_segpath,mask)
        if classdata["datatype"]=="Train":
            with open(os.path.join(voc_dir,"ImageSets","Main",classdata["classname"]+"_train.txt"),"w") as f:
                f.write("\n".join([x.split(".")[0]+" 1" for x in class_train_list_label]))
                f.write("\n")
                f.write("\n".join([x.split(".")[0]+" 0" for x in class_train_list_nolabel]))
        else:
            with open(os.path.join(voc_dir,"ImageSets","Main",classdata["classname"]+"_test.txt"),"w") as f:
                f.write("\n".join([x.split(".")[0]+" 1" for x in class_test_list_label]))
                f.write("\n")
                f.write("\n".join([x.split(".")[0]+" 0" for x in class_test_list_nolabel]))

    with open(os.path.join(voc_dir,"ImageSets","Segmentation","train.txt"),"w") as f:
        f.write("\n".join([x.split(".")[0] for x in train_list]))
    with open(os.path.join(voc_dir,"ImageSets","Segmentation","test.txt"),"w") as f:
        f.write("\n".join([x.split(".")[0] for x in test_list]))


def corlor_segpng(segdir,palette_path):
    with open(palette_path, "rb") as f:
        pallette_dict = json.load(f)
        pallette = []
        for v in pallette_dict.values():
            pallette += v

    files = os.listdir(segdir)
    for file in tqdm(files):
        if ".png" not in file :
            continue
        filepath=os.path.join(segdir,file)
        mask=Image.open(filepath)
        mask = Image.fromarray(np.asarray(mask))
        mask.putpalette(pallette)
        mask.save(filepath)

if __name__ == '__main__':
    dagm_dir="/home/fj/data/DAGM_relabel_me"
    voc_dir ="/home/fj/data/DAGM_VOC_M"

    makDirs(os.path.join(voc_dir,"Annotations"))
    makDirs(os.path.join(voc_dir,"ImageSets","Main"))
    makDirs(os.path.join(voc_dir,"ImageSets","Segmentation"))
    makDirs(os.path.join(voc_dir,"JPEGImages"))
    makDirs(os.path.join(voc_dir,"SegmentationClass"))
    makDirs(os.path.join(voc_dir,"SegmentationObject"))
    main(dagm_dir,voc_dir)
    palette_path = "./palette.json" #调色板
    corlor_segpng(os.path.join(voc_dir,"SegmentationClass"),palette_path)


palette.json 文件内容

{"0": [0, 0, 0], "1": [128, 0, 0], "2": [0, 128, 0], "3": [128, 128, 0], "4": [0, 0, 128], "5": [128, 0, 128], "6": [0, 128, 128], "7": [128, 128, 128], "8": [64, 0, 0], "9": [192, 0, 0], "10": [64, 128, 0], "11": [192, 128, 0], "12": [64, 0, 128], "13": [192, 0, 128], "14": [64, 128, 128], "15": [192, 128, 128], "16": [0, 64, 0], "17": [128, 64, 0], "18": [0, 192, 0], "19": [128, 192, 0], "20": [0, 64, 128], "21": [128, 64, 128], "22": [0, 192, 128], "23": [128, 192, 128], "24": [64, 64, 0], "25": [192, 64, 0], "26": [64, 192, 0], "27": [192, 192, 0], "28": [64, 64, 128], "29": [192, 64, 128], "30": [64, 192, 128], "31": [192, 192, 128], "32": [0, 0, 64], "33": [128, 0, 64], "34": [0, 128, 64], "35": [128, 128, 64], "36": [0, 0, 192], "37": [128, 0, 192], "38": [0, 128, 192], "39": [128, 128, 192], "40": [64, 0, 64], "41": [192, 0, 64], "42": [64, 128, 64], "43": [192, 128, 64], "44": [64, 0, 192], "45": [192, 0, 192], "46": [64, 128, 192], "47": [192, 128, 192], "48": [0, 64, 64], "49": [128, 64, 64], "50": [0, 192, 64], "51": [128, 192, 64], "52": [0, 64, 192], "53": [128, 64, 192], "54": [0, 192, 192], "55": [128, 192, 192], "56": [64, 64, 64], "57": [192, 64, 64], "58": [64, 192, 64], "59": [192, 192, 64], "60": [64, 64, 192], "61": [192, 64, 192], "62": [64, 192, 192], "63": [192, 192, 192], "64": [32, 0, 0], "65": [160, 0, 0], "66": [32, 128, 0], "67": [160, 128, 0], "68": [32, 0, 128], "69": [160, 0, 128], "70": [32, 128, 128], "71": [160, 128, 128], "72": [96, 0, 0], "73": [224, 0, 0], "74": [96, 128, 0], "75": [224, 128, 0], "76": [96, 0, 128], "77": [224, 0, 128], "78": [96, 128, 128], "79": [224, 128, 128], "80": [32, 64, 0], "81": [160, 64, 0], "82": [32, 192, 0], "83": [160, 192, 0], "84": [32, 64, 128], "85": [160, 64, 128], "86": [32, 192, 128], "87": [160, 192, 128], "88": [96, 64, 0], "89": [224, 64, 0], "90": [96, 192, 0], "91": [224, 192, 0], "92": [96, 64, 128], "93": [224, 64, 128], "94": [96, 192, 128], "95": [224, 192, 128], "96": [32, 0, 64], "97": [160, 0, 64], "98": [32, 128, 64], "99": [160, 128, 64], "100": [32, 0, 192], "101": [160, 0, 192], "102": [32, 128, 192], "103": [160, 128, 192], "104": [96, 0, 64], "105": [224, 0, 64], "106": [96, 128, 64], "107": [224, 128, 64], "108": [96, 0, 192], "109": [224, 0, 192], "110": [96, 128, 192], "111": [224, 128, 192], "112": [32, 64, 64], "113": [160, 64, 64], "114": [32, 192, 64], "115": [160, 192, 64], "116": [32, 64, 192], "117": [160, 64, 192], "118": [32, 192, 192], "119": [160, 192, 192], "120": [96, 64, 64], "121": [224, 64, 64], "122": [96, 192, 64], "123": [224, 192, 64], "124": [96, 64, 192], "125": [224, 64, 192], "126": [96, 192, 192], "127": [224, 192, 192], "128": [0, 32, 0], "129": [128, 32, 0], "130": [0, 160, 0], "131": [128, 160, 0], "132": [0, 32, 128], "133": [128, 32, 128], "134": [0, 160, 128], "135": [128, 160, 128], "136": [64, 32, 0], "137": [192, 32, 0], "138": [64, 160, 0], "139": [192, 160, 0], "140": [64, 32, 128], "141": [192, 32, 128], "142": [64, 160, 128], "143": [192, 160, 128], "144": [0, 96, 0], "145": [128, 96, 0], "146": [0, 224, 0], "147": [128, 224, 0], "148": [0, 96, 128], "149": [128, 96, 128], "150": [0, 224, 128], "151": [128, 224, 128], "152": [64, 96, 0], "153": [192, 96, 0], "154": [64, 224, 0], "155": [192, 224, 0], "156": [64, 96, 128], "157": [192, 96, 128], "158": [64, 224, 128], "159": [192, 224, 128], "160": [0, 32, 64], "161": [128, 32, 64], "162": [0, 160, 64], "163": [128, 160, 64], "164": [0, 32, 192], "165": [128, 32, 192], "166": [0, 160, 192], "167": [128, 160, 192], "168": [64, 32, 64], "169": [192, 32, 64], "170": [64, 160, 64], "171": [192, 160, 64], "172": [64, 32, 192], "173": [192, 32, 192], "174": [64, 160, 192], "175": [192, 160, 192], "176": [0, 96, 64], "177": [128, 96, 64], "178": [0, 224, 64], "179": [128, 224, 64], "180": [0, 96, 192], "181": [128, 96, 192], "182": [0, 224, 192], "183": [128, 224, 192], "184": [64, 96, 64], "185": [192, 96, 64], "186": [64, 224, 64], "187": [192, 224, 64], "188": [64, 96, 192], "189": [192, 96, 192], "190": [64, 224, 192], "191": [192, 224, 192], "192": [32, 32, 0], "193": [160, 32, 0], "194": [32, 160, 0], "195": [160, 160, 0], "196": [32, 32, 128], "197": [160, 32, 128], "198": [32, 160, 128], "199": [160, 160, 128], "200": [96, 32, 0], "201": [224, 32, 0], "202": [96, 160, 0], "203": [224, 160, 0], "204": [96, 32, 128], "205": [224, 32, 128], "206": [96, 160, 128], "207": [224, 160, 128], "208": [32, 96, 0], "209": [160, 96, 0], "210": [32, 224, 0], "211": [160, 224, 0], "212": [32, 96, 128], "213": [160, 96, 128], "214": [32, 224, 128], "215": [160, 224, 128], "216": [96, 96, 0], "217": [224, 96, 0], "218": [96, 224, 0], "219": [224, 224, 0], "220": [96, 96, 128], "221": [224, 96, 128], "222": [96, 224, 128], "223": [224, 224, 128], "224": [32, 32, 64], "225": [160, 32, 64], "226": [32, 160, 64], "227": [160, 160, 64], "228": [32, 32, 192], "229": [160, 32, 192], "230": [32, 160, 192], "231": [160, 160, 192], "232": [96, 32, 64], "233": [224, 32, 64], "234": [96, 160, 64], "235": [224, 160, 64], "236": [96, 32, 192], "237": [224, 32, 192], "238": [96, 160, 192], "239": [224, 160, 192], "240": [32, 96, 64], "241": [160, 96, 64], "242": [32, 224, 64], "243": [160, 224, 64], "244": [32, 96, 192], "245": [160, 96, 192], "246": [32, 224, 192], "247": [160, 224, 192], "248": [96, 96, 64], "249": [224, 96, 64], "250": [96, 224, 64], "251": [224, 224, 64], "252": [96, 96, 192], "253": [224, 96, 192], "254": [96, 224, 192], "255": [224, 224, 192]}
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值