VOC数据集,COCO数据集,Yolo数据集

本文详细介绍了VOC、COCO和YOLO三种数据集的格式,以及如何进行格式转换。VOC数据集包括Annotations、ImageSets和JPEGImages三个部分,通过test.py生成txt文件进行训练集和验证集划分。COCO数据集以json文件存储图像注释,而YOLO数据集则需要将XML标注转换为txt文件并按比例划分训练集和测试集。转换过程涉及LabelMe工具和自定义脚本。
摘要由CSDN通过智能技术生成

仅做个人学习记录使用~

VOC数据集格式如下:
看清楚VOC数据集的格式,而不是VOC数据格式,你用LabelImg刚标注好的数据算是VOC的数据格式,它只是包含两个文件夹:一个是images,存放的是原始图片,一个是annotations,存放的是标注好的xml文件。再次基础上你若是想把VOC的数据格式变成VOC数据集的格式,需要参照下图次序重新建立文件夹,将已有的数据移入,然后利用下述代码生成ImageSets文件下的两个txt文件,我这里都是把测试集跟验证集共用,就只分了训练集与验证集。

─VOCdevkit
    └─VOC2007
        │  test.py
        ├─Annotations
        	 └─train
        		 └─x1.xml
        	 └─val
        		 └─x2.xml
        ├─ImageSets
        	 └─Main
        	 	 └─train.txt
        	 	 └─val.txt
        └─JPEGImages
        	 └─train
        	 	 └─x1.jpg
        	 └─val
        	   	 └─x2.jpg

VOC数据集分为三部分,分别是Annotations,ImageSets,JPEGImages,上图中test.py是用于生成ImageSets下的train.txt和val.txt,具体后面会讲,Annotations用于存放对图像进行标注的xml文件;
ImageSets下存放的是利用test.py生成的train.txt和val.txt,这两个txt文件里面存放的是去除后缀名的xml文件的文件名,用于模型训练和测试时查找文件使用;
JPEGImages存放的是我们采集的图片;
test.py内容如下所示:

import os
import random

# 训练集和验证集的比例分配
trainval_percent = 0.1
train_percent = 0.9

# 标注文件的路径
xmlfilepath = 'Annotations'

# 生成的txt文件存放路径
txtsavepath = 'ImageSets\Main'
total_xml = os.listdir(xmlfilepath)

num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)

ftrainval = open('ImageSets/Main/trainval.txt', 'w')
ftest = open('ImageSets/Main/test.txt', 'w')
ftrain = open('ImageSets/Main/train.txt', 'w')
fval = open('ImageSets/Main/val.txt', 'w')

for i in list:
    name = total_xml[i][:-4] + '\n'
    if i in trainval:
        ftrainval.write(name)
        if i in train:
            ftest.write(name)
        else:
            fval.write(name)
    else:
        ftrain.write(name)

ftrainval.close()
ftrain.close()
fval.close()
ftest.close()

COCO数据集格式如下:

─annotations
	└─instances_train2017.json
	└─instances_test2017.json
─train2017
	└─x1.jpg
	   ...
─test2017
	└─x2.jpg
		...

上式中,annotations下的instances_train2017.json文件是对train2017下jpg图片生成的json注释文件进行汇总到一个文件中,代码如下:

# -*- coding:utf-8 -*-
# !/usr/bin/env python
 
import argparse
import json
import matplotlib.pyplot as plt
import skimage.io as io
#import cv2
from labelme import utils
import numpy as np
import glob
import PIL.Image

class MyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        else:
            return super(MyEncoder, self).default(obj)
 
class labelme2coco(object):
    def __init__(self, labelme_json=[], save_json_path='./test.json'):   #save_json_path='./test.json'
        '''
        :param labelme_json: 所有labelme的json文件路径组成的列表
        :param save_json_path: json保存位置
        '''
        self.labelme_json = labelme_json
        self.save_json_path = save_json_path
        self.images = []
        self.categories = []
        self.annotations = []
        # self.data_coco = {}
        self.label = []
        self.annID = 1
        self.height = 0
        self.width = 0
        self.save_json()
 
    def data_transfer(self):
 
        for num, json_file in enumerate(self.labelme_json):
            print("############:",json_file)
            with open(json_file, 'r') as fp:
                data = json.load(fp)  # 加载json文件
                print(data['imagePath'])
                self.images.append(self.image(data, num))
                for shapes in data['shapes']:
                    label = shapes['label']
                    if label not in self.label:
                        self.categories.append(self.categorie(label))
                        self.label.append(label)
                    points = shapes['points']#这里的point是用rectangle标注得到的,只有两个点,需要转成四个点
                    #points.append([points[0][0],points[1][1]])
                    #points.append([points[1][0],points[0][1]])
                    self.annotations.append(self.annotation(points, label, num))
                    self.annID += 1
    def image(self, data, num):
        image = {}
        img = utils.img_b64_to_arr(data['imageData'])  # 解析原图片数据
        # img=io.imread(data['imagePath']) # 通过图片路径打开图片
        # img = cv2.imread(data['imagePath'], 0)
        height, width = img.shape[:2]
        img = None
        image['height'] = height
        image['width'] = width
        image['id'] = num + 1
        image['file_name'] = data['imagePath'].split('/')[-1]
        #image['file_name'] = data['imagePath'][3:14]
        self.height = height
        self.width = width

        return image

    def categorie(self, label):
        categorie = {}
        categorie['supercategory'] = 'waxberry'
        categorie['id'] = len(self.label) + 1  # 0 默认为背景
        categorie['name'] = label
        return categorie
 
    def annotation(self, points, label, num):
        annotation = {}
        annotation['segmentation'] = [list(np.asarray(points).flatten())]
        annotation['iscrowd'] = 0
        annotation['image_id'] = num + 1
        # annotation['bbox'] = str(self.getbbox(points)) # 使用list保存json文件时报错(不知道为什么)
        # list(map(int,a[1:-1].split(','))) a=annotation['bbox'] 使用该方式转成list
        annotation['bbox'] = list(map(float, self.getbbox(points)))
        annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
        # annotation['category_id'] = self.getcatid(label)
        annotation['category_id'] = self.getcatid(label)#注意,源代码默认为1
        annotation['id'] = self.annID
        return annotation
 
    def getcatid(self, label):
        for categorie in self.categories:
            if label == categorie['name']:
                return categorie['id']
        return 1
 
    def getbbox(self, points):
        # img = np.zeros([self.height,self.width],np.uint8)
        # cv2.polylines(img, [np.asarray(points)], True, 1, lineType=cv2.LINE_AA)  # 画边界线
        # cv2.fillPoly(img, [np.asarray(points)], 1)  # 画多边形 内部像素值为1
        polygons = points
 
        mask = self.polygons_to_mask([self.height, self.width], polygons)
        return self.mask2box(mask)
 
    def mask2box(self, mask):
        '''从mask反算出其边框
        mask:[h,w]  0、1组成的图片
        1对应对象,只需计算1对应的行列号(左上角行列号,右下角行列号,就可以算出其边框)
        '''
        # np.where(mask==1)
        index = np.argwhere(mask == 1)
        rows = index[:, 0]
        clos = index[:, 1]
        # 解析左上角行列号
        left_top_r = np.min(rows)  # y
        left_top_c = np.min(clos)  # x
 
        # 解析右下角行列号
        right_bottom_r = np.max(rows)
        right_bottom_c = np.max(clos)
 
        # return [(left_top_r,left_top_c),(right_bottom_r,right_bottom_c)]
        # return [(left_top_c, left_top_r), (right_bottom_c, right_bottom_r)]
        # return [left_top_c, left_top_r, right_bottom_c, right_bottom_r]  # [x1,y1,x2,y2]
        return [left_top_c, left_top_r, right_bottom_c - left_top_c,
                right_bottom_r - left_top_r]  # [x1,y1,w,h] 对应COCO的bbox格式
 
    def polygons_to_mask(self, img_shape, polygons):
        mask = np.zeros(img_shape, dtype=np.uint8)
        mask = PIL.Image.fromarray(mask)
        xy = list(map(tuple, polygons))
        PIL.ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
        mask = np.array(mask, dtype=bool)
        return mask
 
    def data2coco(self):
        data_coco = {}
        data_coco['images'] = self.images
        data_coco['categories'] = self.categories
        data_coco['annotations'] = self.annotations
        return data_coco
 
    def save_json(self):
        self.data_transfer()
        self.data_coco = self.data2coco()
        # 保存json文件
        json.dump(self.data_coco, open(self.save_json_path, 'w'), indent=4, cls=MyEncoder)  # indent=4 更加美观显示
 
 
# labelme_json = glob.glob('./data/coco/valset/*.json')
# labelme_json=['./Annotations/*.json']

labelme_json = glob.glob(r'I:\xxx\xxx/*.json')
labelme2coco(labelme_json, r'I:\xxx\xxx/instances_test2017.json')

只需更改三处
1-最后一行生成的instances_test2017.json的地址
2-倒数第二行,train文件下图片的注释json文件
3-第八十行的标签类别
在这里插入图片描述
Yolo数据集

--Root
   --Annotations
   		--videoDir1
   			--0.xml
   			--1.xml
   		--videoDir2
   			--0.xml
   			--1.xml
   --Images
   		--videoDir1
   			--0.jpg
   			--1.jpg
   		--videoDir2
   			--0.jpg
   			--1.jpg
   --labels
   		--videoDir1
   			--0.txt
   			--1.txt
   		--videoDir2
   			--0.txt
   			--1.txt
   	--train.txt
   	--test.txt

将VOC数据集转换成YOLO利用如下代码:

import os
import glob
import random
import xml.etree.ElementTree as ET
currentRoot = os.getcwd()
classes = ["alive_fish","dead_fish"] #这里改成你自己的类名
train_percent = 0.8
def convert(size, box):
    dw = 1. / size[0]
    dh = 1. / size[1]
    x = (box[0] + box[1]) / 2.0
    y = (box[2] + box[3]) / 2.0
    w = box[1] - box[0]
    h = box[3] - box[2]
    x = x * dw
    w = w * dw
    y = y * dh
    h = h * dh
    return (x, y, w, h)

def convert_annotation(xmlfile,labelfile):
    with open(xmlfile,'r') as in_file:
        with open(labelfile, 'w') as out_file:
            tree = ET.parse(in_file)
            root = tree.getroot()
            size = root.find('size')
            w = int(size.find('width').text)
            h = int(size.find('height').text)
            for obj in root.iter('object'):
                difficult = obj.find('difficult').text
                cls = obj.find('name').text
                if cls not in classes or int(difficult) == 1:
                    continue
                cls_id = classes.index(cls)
                xmlbox = obj.find('bndbox')
                b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
                     float(xmlbox.find('ymax').text))
                bb = convert((w, h), b)
                out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')

if __name__ == "__main__":
    currentRoot = os.getcwd()
    imgdirpath = os.path.join(currentRoot,"images","*")
    imgdirlist = glob.glob(imgdirpath)
    for i in range(len(imgdirlist)):
        imgdir = imgdirlist[i]
        labeldir = imgdir.replace('images','labels')
        if(not os.path.exists(labeldir)):
            os.mkdir(labeldir)
    imgdirpath = os.path.join(imgdirpath,"*.jpg")
    imgpathlist = glob.glob(imgdirpath)
    for i in range(len(imgpathlist)):
        imgfilepath = imgpathlist[i]
        labelfilepath = imgfilepath.replace('images','labels')
        labelfilepath = labelfilepath.replace('.jpg','.txt')
        if(not os.path.exists(labelfilepath)):
            xmlfilepath = imgfilepath.replace('images','Annotations')
            xmlfilepath = xmlfilepath.replace('.jpg','.xml')
            if(not os.path.exists(xmlfilepath)):
                print("no xml file exists: "+xmlfilepath)
                continue
            else:
                convert_annotation(xmlfilepath,labelfilepath)
    labelfilepath = os.path.join(os.path.join(currentRoot,"labels","*","*.txt"))
    labelfilepathlist = glob.glob(labelfilepath)
    imagelist = []
    for i in range(len(labelfilepathlist)):
        labelfilepath = labelfilepathlist[i]
        image = labelfilepath.replace('labels','images')
        image = image.replace('.txt','.jpg')
        imagelist.append(image)
    print(len(imagelist))
    num = len(imagelist)
    trainnum = int(num * train_percent)
    random.shuffle(imagelist)
    print(len(imagelist))
    for i in range(num):
        if(i<trainnum):
            with open('train.txt', 'a') as trainf:
                trainf.write(imagelist[i]+'\n')
        else:
            with open('test.txt', 'a') as testf:
                testf.write(imagelist[i]+'\n')

参考 链接:

https://blog.csdn.net/weixin_38616018/article/details/112397396?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522161995714616780262563315%2522%252C%2522scm%2522%253A%252220140713.130102334.pc%255Fblog.%2522%257D&request_id=161995714616780262563315&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2blogfirst_rank_v2~rank_v29-6-112397396.nonecase&utm_term=voc+to+yolo%E8%AE%AD%E7%BB%83%E8%87%AA%E5%B7%B1%E7%9A%84%E6%95%B0%E6%8D%AE

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值