数据集下载
https://github.com/VisDrone/VisDrone-Dataset
数据集大小:
- trainset (1.44 GB):
- valset (0.07 GB):
- testset-dev (0.28 GB):
VisDrone2019-DET-train.zip
(下载到D:/ultralytics/ultralytics/datasets/VisDrone目录下并解压)
VisDrone2019-DET-val.zip
(下载到D:/ultralytics/ultralytics/datasets/VisDrone目录下并解压)
VisDrone2019-DET-test-dev.zip
(下载到D:/ultralytics/ultralytics/datasets/VisDrone目录下并解压)
VisDrone2019-DET-test-challenge.zip
(下载到D:/ultralytics/ultralytics/datasets/VisDrone目录下并解压)
convert_visdrone2yolo.py (下载到
D:/ultralytics/ultralytics/datasets/VisDrone目录下)
convert_yolo2visdrone.py
注意:VisDrone目录自己创建
visdrone转变代码
import os
from pathlib import Path
# 定义一个函数用于将VisDrone数据集的标注转换为YOLO格式的标注
def visdrone2yolo(dir):
from PIL import Image
from tqdm import tqdm
# 定义一个内部函数,用于将VisDrone的bbox坐标转换为YOLO格式的坐标
def convert_box(size, box):
# VisDrone框转换为YOLO xywh框
dw = 1. / size[0]
dh = 1. / size[1]
return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
# 创建标签目录,如果不存在则创建
(dir / 'labels').mkdir(parents=True, exist_ok=True)
# 使用tqdm进度条迭代处理annotations文件夹中的所有txt文件
pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
for f in pbar:
# 获取对应图片的大小
img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
lines = []
with open(f, 'r') as file: # 读取annotation.txt文件
for row in [x.split(',') for x in file.read().strip().splitlines()]:
if row[4] == '0': # 忽略VisDrone中的‘ignored regions’类别0
continue
cls = int(row[5]) - 1
box = convert_box(img_size, tuple(map(int, row[:4])))
lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
with open(str(f).replace(f'{os.sep}annotations{os.sep}', f'{os.sep}labels{os.sep}'), 'w') as fl:
fl.writelines(lines) # 将转换后的标签写入label.txt文件
# 指定数据集的根目录
dir = Path("D:/ultralytics/ultralytics/datasets/VisDrone")
# 转换数据集标注
for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
visdrone2yolo(dir / d) # 转换VisDrone标注为YOLO标签
然后通过yolo转coco的代码
import os
import cv2
import json
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import argparse
# 解析命令行参数
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default='./dataset/valid', type=str, help="根目录路径,包括./images和./labels及classes.txt")
parser.add_argument('--save_path', type=str, default='./valid.json', help="如果不划分数据集,则提供一个json文件路径")
parser.add_argument('--random_split', action='store_true', help="随机划分数据集,默认比例为8:1:1")
parser.add_argument('--split_by_file', action='store_true', help="根据文件划分数据集,包括./train.txt、./val.txt、./test.txt")
arg = parser.parse_args()
def train_test_val_split_random(img_paths, ratio_train=0.8, ratio_test=0.1, ratio_val=0.1):
# 确保三个比例之和为1
assert int(ratio_train + ratio_test + ratio_val) == 1
train_img, middle_img = train_test_split(img_paths, test_size=1 - ratio_train, random_state=233)
ratio = ratio_val / (1 - ratio_train)
val_img, test_img = train_test_split(middle_img, test_size=ratio, random_state=233)
print("训练集:验证集:测试集 = {}:{}:{}".format(len(train_img), len(val_img), len(test_img)))
return train_img, val_img, test_img
def train_test_val_split_by_files(img_paths, root_dir):
# 根据train.txt, val.txt, test.txt文件定义训练集、验证集和测试集
phases = ['train', 'val', 'test']
img_split = []
for p in phases:
define_path = os.path.join(root_dir, f'{p}.txt')
print(f'从 {define_path} 读取 {p} 数据集定义')
assert os.path.exists(define_path)
with open(define_path, 'r') as f:
img_paths = f.readlines()
# 取消注释下面一行可以使用绝对路径
# img_paths = [os.path.split(img_path.strip())[1] for img_path in img_paths]
img_split.append(img_paths)
return img_split[0], img_split[1], img_split[2]
def yolo2coco(arg):
root_path = arg.root_dir
print("从路径加载数据", root_path)
assert os.path.exists(root_path)
originLabelsDir = os.path.join(root_path, 'labels')
originImagesDir = os.path.join(root_path, 'images')
with open(os.path.join(root_path, 'classes.txt')) as f:
classes = f.read().strip().split()
indexes = os.listdir(originImagesDir)
if arg.random_split or arg.split_by_file:
train_dataset = {'categories': [], 'annotations': [], 'images': []}
val_dataset = {'categories': [], 'annotations': [], 'images': []}
test_dataset = {'categories': [], 'annotations': [], 'images': []}
for i, cls in enumerate(classes, 0):
category_info = {'id': i, 'name': cls, 'supercategory': 'mark'}
train_dataset['categories'].append(category_info)
val_dataset['categories'].append(category_info)
test_dataset['categories'].append(category_info)
if arg.random_split:
print("划分模式: 随机划分")
train_img, val_img, test_img = train_test_val_split_random(indexes, 0.8, 0.1, 0.1)
elif arg.split_by_file:
print("划分模式: 根据文件划分")
train_img, val_img, test_img = train_test_val_split_by_files(indexes, root_path)
else:
dataset = {'categories': [], 'annotations': [], 'images': []}
for i, cls in enumerate(classes, 0):
dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
ann_id_cnt = 0
for k, index in enumerate(tqdm(indexes)):
txtFile = index.replace('images', 'txt').replace('.jpg', '.txt').replace('.png', '.txt')
im = cv2.imread(os.path.join(root_path, 'images/') + index)
height, width, _ = im.shape
if arg.random_split or arg.split_by_file:
if index in train_img:
dataset = train_dataset
elif index in val_img:
dataset = val_dataset
elif index in test_img:
dataset = test_dataset
dataset['images'].append({'file_name': index, 'id': k, 'width': width, 'height': height})
if not os.path.exists(os.path.join(originLabelsDir, txtFile)):
continue
with open(os.path.join(originLabelsDir, txtFile), 'r') as fr:
labelList = fr.readlines()
for label in labelList:
label = label.strip().split()
x = float(label[1])
y = float(label[2])
w = float(label[3])
h = float(label[4])
H, W, _ = im.shape
x1 = (x - w / 2) * W
y1 = (y - h / 2) * H
x2 = (x + w / 2) * W
y2 = (y + h / 2) * H
cls_id = int(label[0])
width = max(0, x2 - x1)
height = max(0, y2 - y1)
dataset['annotations'].append({
'area': width * height,
'bbox': [x1, y1, width, height],
'category_id': cls_id,
'id': ann_id_cnt,
'image_id': k,
'iscrowd': 0,
'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]]
})
ann_id_cnt += 1
folder = os.path.join(root_path, 'annotations')
if not os.path.exists(folder):
os.makedirs(folder)
if arg.random_split or arg.split_by_file:
for phase in ['train', 'val', 'test']:
json_name = os.path.join(root_path, 'annotations/{}.json'.format(phase))
with open(json_name, 'w') as f:
if phase == 'train':
json.dump(train_dataset, f)
elif phase == 'val':
json.dump(val_dataset, f)
elif phase == 'test':
json.dump(test_dataset, f)
print('已保存注释到 {}'.format(json_name))
else:
json_name = os.path.join(root_path, 'annotations/{}'.format(arg.save_path))
with open(json_name, 'w') as f:
json.dump(dataset, f)
print('已保存注释到 {}'.format(json_name))
if __name__ == "__main__":
yolo2coco(arg)
运行代码
python yolo2coco.py --root_dir VisDrone2019-DET-train --save_path train.json
python yolo2coco.py --root_dir VisDrone2019-DET-val --save_path val.json
python yolo2coco.py --root_dir VisDrone2019-DET-test-dev --save_path test.json