把VOC数据集转化成txt文件python

主要是网上一些已有并已用了觉得还可以的,同时记录方便以后查询使用。
第一个代码是生成VOC数据集的无后缀文件名于txt文件中,如下所示:

E84.27832967649_N46.23271680371_Level_17
E84.27323348179_N46.20431951701_Level_17
E84.26983244239_N46.22384654871_Level_17
E84.26467187469_N46.19801444161_Level_17
E84.25288088939_N46.183574654009995_Level_17
E84.24682982879_N46.195036174509994_Level_17
E84.24360045069_N46.237184791009994_Level_17
E84.23477062279_N46.235218030809996_Level_17
E84.22621974449_N46.22634817991_Level_17
E84.22523269199_N46.243774779409996_Level_17
E84.20790562999_N46.23288751421_Level_17
E84.19428001469_N46.23460943411_Level_17
E84.18946276959_N46.231165540309995_Level_17
E84.18435584609_N46.24350021241_Level_17
E84.17493593249_N46.23213044611_Level_17
E84.17491447479_N46.22991856001_Level_17
E84.17009722969_N46.22870124241_Level_17
E84.16549456119_N46.22301512081_Level_17
....
import os
import random 
 

 
xmlfilepath='./dataset/wind_turbine/600/Annotations'
saveBasePath='./dataset/wind_turbine/main/'

trainval_percent=0.5
train_percent=0.5
#按比例分配数据集并写入txt文件
total_xml = os.listdir(xmlfilepath)
num=len(total_xml)  
list=range(num)  
tv=int(num*trainval_percent)  
tr=int(tv*train_percent)  
trainval= random.sample(list,tv)  
train=random.sample(trainval,tr)  
 
print("train and val size",tv)
print("traub suze",tr)
ftrainval = open(os.path.join(saveBasePath,'trainval_600.txt'), 'w')  
ftest = open(os.path.join(saveBasePath,'test.txt'), 'w')  
ftrain = open(os.path.join(saveBasePath,'train.txt'), 'w')  
fval = open(os.path.join(saveBasePath,'val.txt'), 'w')  
 
for i  in list:  
    name=total_xml[i][:-4]+'\n'  
    if i in trainval:  
        ftrainval.write(name)  
        if i in train:  
            ftrain.write(name)  
        else:  
            fval.write(name)  
    else:  
        ftest.write(name)  
  
ftrainval.close()  
ftrain.close()  
fval.close()  
ftest .close()

第二个是生成VOC数据集含jpg图像数据的路径+对应图片的标签数据于txt文件中,效果如下所示:

nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.27832967649_N46.23271680371_Level_17.jpg 228.99391,222.47363,312.58466,304.47232,0 233.84084,532.4577,324.12997,599.7597,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.27323348179_N46.20431951701_Level_17.jpg 201.64653,494.78455,337.38416,557.193,0 211.9857,233.18863,309.72287,305.5362,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.26983244239_N46.22384654871_Level_17.jpg 223.42166,218.48094,311.25772,302.37592,0 234.28397,534.29614,329.2536,598.3805,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.26467187469_N46.19801444161_Level_17.jpg 216.01837,230.02402,315.0269,306.0483,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.25288088939_N46.183574654009995_Level_17.jpg 212.70818,226.00427,316.77173,305.18716,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.24682982879_N46.195036174509994_Level_17.jpg 208.24023,225.01248,308.3647,302.37195,0 224.92915,487.00003,313.08423,567.02155,0 204.29791,0.0,293.57907,49.22905,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.24360045069_N46.237184791009994_Level_17.jpg 222.19019,236.3034,315.72125,307.9717,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.23477062279_N46.235218030809996_Level_17.jpg 206.90015,220.33434,313.02274,306.4058,0 220.44206,0.0,284.43204,22.45714,0 232.01357,541.43335,272.89383,597.89197,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.22621974449_N46.22634817991_Level_17.jpg 216.45709,235.43375,313.00192,306.33673,0 229.08986,544.2234,272.83643,598.7043,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.22523269199_N46.243774779409996_Level_17.jpg 228.57738,218.4152,312.82574,302.70883,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.20790562999_N46.23288751421_Level_17.jpg 220.28488,234.50874,315.49698,308.64893,0
/nfs/private/junnxie/codes/opencv-mosaic-data-aug-master/./dataset/wind_turbine/600/Images/E84.19428001469_N46.23460943411_Level_17.jpg 243.37088,250.062,308.4704,306.28558,0 249.10687,459.61624,310.7504,515.1315,0 239.11302,41.748695,303.94968,95.20206,0
.....

使用前读懂参数设置,建议使用的命令行如下

python3 voc2txt_annotation.py -name bdd100k_obj -input_dir D:/BDD100K/ -save data_txt

实际上上述参数可以在代码中默认设置,不需要加也可以使用。

import os
import logging
import argparse
from tqdm import tqdm
import xml.etree.ElementTree as ET
from Generate_coco_classes import main
from ipdb import set_trace

logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger('voc2txt')
logger.setLevel(logging.DEBUG)

# 改annotations
# python3 voc2txt_annotation.py -name bdd100k_obj -input_dir D:/BDD100K/ -save data_txt

def parse_arguments():
    parser = argparse.ArgumentParser(description='This a script to generate train, test, val dataset, the generated txt file will be used for yolo training')
    parser.add_argument('-name', default="mosaic", help="Dataset name", action="store_true")
    parser.add_argument('-input_dir',default="./dataset/wind_turbine/600",
                        help="Read dataset annotations", action="store_true")
    parser.add_argument('-save', default='data_txt', help="Txt file generated for centernet training and test", action="store_true")
    args = parser.parse_args()
    return args

def convert_annotation(image_id, list_file, image_set, input_dir_path):
    # print(os.path.join(input_dir_path, 'Annotations/%s.xml'%(image_id)))
    # in_file = open(os.path.join(input_dir_path, 'Annotations/%s/%s.xml'%(image_set, image_id)))
    in_file = open(os.path.join(input_dir_path, 'Annotations/%s.xml' % (image_id)))
    tree=ET.parse(in_file)
    root = tree.getroot()
    list_file.write(os.path.join(current_path, input_dir_path, 'Images/%s.jpg'%(image_id)))
    for obj in root.iter('object'):
        difficult = obj.find('difficult').text
        cls = obj.find('name').text
        if cls not in classes or int(difficult)==1:
            continue
        cls_id = classes.index(cls)
        xmlbox = obj.find('bndbox')
        # b = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text), int(xmlbox.find('xmax').text), int(xmlbox.find('ymax').text))
        b = (float(xmlbox.find('xmin').text), float(xmlbox.find('ymin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymax').text))

        list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))

    list_file.write('\n')

def save_data_txt(input_dir_path, sets):
    for name, image_set in sets:
        # print(os.path.join(input_dir_path, 'ImageSets','Main','%s.txt'%(image_set)))
        logger.info(input_dir_path)
        # set_trace()
        # image_ids = open(os.path.join(input_dir_path, 'ImageSets', 'Main','%s.txt'%(image_set))).read().strip().split()
        image_ids = open(os.path.join(input_dir_path,'%s.txt'%(image_set))).read().strip().split()
        list_file = open(os.path.join(save_path, '%s_%s.txt'%(args.name, image_set)), 'w')
        for image_id in tqdm(image_ids):
            convert_annotation(image_id, list_file, image_set, input_dir_path)
        list_file.close()

if __name__=='__main__':
    current_path = os.getcwd()
    args = parse_arguments()

    save_path = args.save
    os.makedirs(save_path, exist_ok=True)
    # classes = ['rider', 'car', 'bike', 'person', 'train', 'traffic light', 'motor', 'bus', 'truck', 'traffic sign']
    classes = ['wind_turbine']
    # classes = main(year=2017)

    # Train/Val
    # trainval_sets=[(args.name, 'train'), (args.name, 'trainval'), (args.name, 'val')]
    trainval_sets = [(args.name, 'trainval_600')]
    trainval_input_dir_path = args.input_dir
    save_data_txt(trainval_input_dir_path, trainval_sets)

   

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

清梦枕星河~

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值