1、数据集划分
import os, shutil, random
random.seed(0)
import numpy as np
from sklearn.model_selection import train_test_split
val_size = 0.1
test_size = 0.2
postfix = 'jpg'
imgpath = 'VOCdevkit/JPEGImages'
txtpath = 'VOCdevkit/txt'
os.makedirs('images/train', exist_ok=True)
os.makedirs('images/val', exist_ok=True)
os.makedirs('images/test', exist_ok=True)
os.makedirs('labels/train', exist_ok=True)
os.makedirs('labels/val', exist_ok=True)
os.makedirs('labels/test', exist_ok=True)
listdir = np.array([i for i in os.listdir(txtpath) if 'txt' in i])
random.shuffle(listdir)
train, val, test = listdir[:int(len(listdir) * (1 - val_size - test_size))], listdir[int(len(listdir) * (1 - val_size - test_size)):int(len(listdir) * (1 - test_size))], listdir[int(len(listdir) * (1 - test_size)):]
print(f'train set size:{len(train)} val set size:{len(val)} test set size:{len(test)}')
for i in train:
shutil.copy('{}/{}.{}'.format(imgpath, i[:-4], postfix), 'images/train/{}.{}'.format(i[:-4], postfix))
shutil.copy('{}/{}'.format(txtpath, i), 'labels/train/{}'.format(i))
for i in val:
shutil.copy('{}/{}.{}'.format(imgpath, i[:-4], postfix), 'images/val/{}.{}'.format(i[:-4], postfix))
shutil.copy('{}/{}'.format(txtpath, i), 'labels/val/{}'.format(i))
for i in test:
shutil.copy('{}/{}.{}'.format(imgpath, i[:-4], postfix), 'images/test/{}.{}'.format(i[:-4], postfix))
shutil.copy('{}/{}'.format(txtpath, i), 'labels/test/{}'.format(i))
2、xml转txt格式
xml2txt
import xml.etree.ElementTree as ET
import os, cv2
import numpy as np
from os import listdir
from os.path import join
classes = []
def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = (box[0] + box[1]) / 2.0 - 1
y = (box[2] + box[3]) / 2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def convert_annotation(xmlpath, xmlname):
with open(xmlpath, "r", encoding='utf-8') as in_file:
txtname = xmlname[:-4] + '.txt'
txtfile = os.path.join(txtpath, txtname)
tree = ET.parse(in_file)
root = tree.getroot()
filename = root.find('filename')
img = cv2.imdecode(np.fromfile('{}/{}.{}'.format(imgpath, xmlname[:-4], postfix), np.uint8), cv2.IMREAD_COLOR)
h, w = img.shape[:2]
res = []
for obj in root.iter('object'):
cls = obj.find('name').text
if cls not in classes:
classes.append(cls)
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert((w, h), b)
res.append(str(cls_id) + " " + " ".join([str(a) for a in bb]))
if len(res) != 0:
with open(txtfile, 'w+') as f:
f.write('\n'.join(res))
if __name__ == "__main__":
postfix = 'jpg'
imgpath = 'VOCdevkit/JPEGImages'
xmlpath = 'VOCdevkit/Annotations'
txtpath = 'VOCdevkit/txt'
if not os.path.exists(txtpath):
os.makedirs(txtpath, exist_ok=True)
list = os.listdir(xmlpath)
error_file_list = []
for i in range(0, len(list)):
try:
path = os.path.join(xmlpath, list[i])
if ('.xml' in path) or ('.XML' in path):
convert_annotation(path, list[i])
print(f'file {list[i]} convert success.')
else:
print(f'file {list[i]} is not xml format.')
except Exception as e:
print(f'file {list[i]} convert error.')
print(f'error message:\n{e}')
error_file_list.append(list[i])
print(f'this file convert failure\n{error_file_list}')
print(f'Dataset Classes:{classes}')
3、yolo转coco格式
import os
import cv2
import json
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import argparse
# visdrone2019
classes = ['pedestrain', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor']
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', default='',type=str, help="path of images")
parser.add_argument('--label_path', default='',type=str, help="path of labels .txt")
parser.add_argument('--save_path', default='data.json', type=str, help="if not split the dataset, give a path to a json file")
arg = parser.parse_args()
def yolo2coco(arg):
print("Loading data from ", arg.image_path, arg.label_path)
assert os.path.exists(arg.image_path)
assert os.path.exists(arg.label_path)
originImagesDir = arg.image_path
originLabelsDir = arg.label_path
# images dir name
indexes = os.listdir(originImagesDir)
dataset = {'categories': [], 'annotations': [], 'images': []}
for i, cls in enumerate(classes, 0):
dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
# 标注的id
ann_id_cnt = 0
for k, index in enumerate(tqdm(indexes)):
# 支持 png jpg 格式的图片.
txtFile = f'{index[:index.rfind(".")]}.txt'
stem = index[:index.rfind(".")]
# 读取图像的宽和高
try:
im = cv2.imread(os.path.join(originImagesDir, index))
height, width, _ = im.shape
except Exception as e:
print(f'{os.path.join(originImagesDir, index)} read error.\nerror:{e}')
# 添加图像的信息
if not os.path.exists(os.path.join(originLabelsDir, txtFile)):
# 如没标签,跳过,只保留图片信息.
continue
dataset['images'].append({'file_name': index,
'id': stem,
'width': width,
'height': height})
with open(os.path.join(originLabelsDir, txtFile), 'r') as fr:
labelList = fr.readlines()
for label in labelList:
label = label.strip().split()
x = float(label[1])
y = float(label[2])
w = float(label[3])
h = float(label[4])
# convert x,y,w,h to x1,y1,x2,y2
H, W, _ = im.shape
x1 = (x - w / 2) * W
y1 = (y - h / 2) * H
x2 = (x + w / 2) * W
y2 = (y + h / 2) * H
# 标签序号从0开始计算, coco2017数据集标号混乱,不管它了。
cls_id = int(label[0])
width = max(0, x2 - x1)
height = max(0, y2 - y1)
dataset['annotations'].append({
'area': width * height,
'bbox': [x1, y1, width, height],
'category_id': cls_id,
'id': ann_id_cnt,
'image_id': stem,
'iscrowd': 0,
# mask, 矩形是从左上角点按顺时针的四个顶点
'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]]
})
ann_id_cnt += 1
# 保存结果
with open(arg.save_path, 'w') as f:
json.dump(dataset, f)
print('Save annotation to {}'.format(arg.save_path))
if __name__ == "__main__":
yolo2coco(arg)