目标检测常用数据集格式转化voc yolo coco

实验数据集介绍

本次使用的数据集是NWPU VHR-10数据集,涉及10个目标,一共有650张图像,650张图像分别对应了650个标注文件,标注文件中一行为标注一个目标,其中前面两个值分别表示左上角和右下角的坐标位置,最后一个值表示目标的类别,以下是10个目标对应的ID。

1-airplane, 2-ship, 3-storage tank, 4-baseball diamond, 5-tennis court, 6-basketball court, 7-ground track field, 8-harbor, 9-bridge, 10-vehicle

现在要在主流的一些目标检测框架上进行训练和测试,首先要转化为主流的目标检测的格式,包括coco格式、voc格式和yolo格式,方便在这些网络的基础上进行微调。

YOLO格式

yolo的格式也分为多种:

yolov3格式版本:

代码地址:eriklindernoren/PyTorch-YOLOv3: Minimal PyTorch implementation of YOLOv3 (github.com)

格式要求:Move your annotations to data/custom/labels/. The dataloader expects that the annotation file corresponding to the image data/custom/images/train.jpg has the path data/custom/labels/train.txt. Each row in the annotation file should define one bounding box, using the syntax label_idx x_center y_center width height. The coordinates should be scaled [0, 1], and the label_idx should be zero-indexed and correspond to the row number of the class name in data/custom/classes.names.

每张图片对应一个标注文件,其中每一行表示一个目标,从左向右依次是 label_idx x_center y_center width height,label_idx是从0开始的。

数据格式:类别id 中心点x坐标 中心点y坐标 w h

常用的数据格式就是这样,关键在于如何将代码进行转化,主要是左上角坐标和右下角坐标到yolo这四个格式之间的转化公式。

# 标准的转化形式如下:
x, y, x2, y2 = obj['bbox']
class_name = obj['name']
label = classes_dict[class_name]
cx = (x2+x)*0.5 / width
cy = (y2+y)*0.5 / height
w = (x2-x)*1. / width
h = (y2-y)*1. / height
line = "%s %.6f %.6f %.6f %.6f\n" % (label, cx, cy, w, h)
lines.append(line)

正式得转化代码如下:

# 源数据地址:F:\datas\遥感数据集\NWPU VHR-10\o
import os
import numpy as np
from PIL import Image


def save_txt(save_folder, save_name, yolo_objects):
    save_path = os.path.join(save_folder, save_name)
    yolo_objects = np.array(yolo_objects)
    # todo numpy.savetxt(fname,X,fmt ='%。18e',delimiter ='',
    # np.savetxt('data/task.txt', self.task, fmt="%d", delimiter=" ")
    # np.savetxt(save_path, yolo_objects, fmt ='%s', delimiter ='\n')
    np.savetxt(save_path, yolo_objects, fmt="%d %.18f %.18f %.18f %.18f", delimiter=' ')


def parse_img(img_path):
    img = Image.open(img_path)
    # 通过size函数返回的是wH
    print(img.size)
    np_img = np.array(img)
    # pillow 读取图片是HWC
    print(np_img.shape)

# 本地文件夹:E:\dddata\NWPU VHR-10 dataset
# 解析标注文件
def parse_txt(img_path, ann_path):
    objects = []
    ann_lines = np.loadtxt(ann_path, dtype=str, delimiter=',')
    n_dim = ann_lines.ndim
    # print(ann_lines.ndim)
    # print(ann_lines)
    img = Image.open(img_path)
    width, height = img.size
    if n_dim == 1:
        object = []
        x_min = int(ann_lines[0][1:])
        y_min = int(ann_lines[1][:-1])
        x_max = int(ann_lines[2][1:])
        y_max = int(ann_lines[3][:-1])
        label_id = int(ann_lines[4])
        object.append(x_min)
        object.append(y_min)
        object.append(x_max)
        object.append(y_max)
        object.append(label_id)
        object.append(width)
        object.append(height)
        objects.append(object)
        # print(x_min, y_min, x_max, y_max, lable_id)
    else:
        for ann_line in ann_lines:
            object = []
            x_min = int(ann_line[0][1:])
            y_min = int(ann_line[1][:-1])
            x_max = int(ann_line[2][1:])
            y_max = int(ann_line[3][:-1])
            label_id = int(ann_line[4])
            object.append(x_min)
            object.append(y_min)
            object.append(x_max)
            object.append(y_max)
            object.append(label_id)
            object.append(width)
            object.append(height)
            objects.append(object)
    # print(objects)
    return objects


# 首先从原始文件中提取出文件路径, 文件名,以及图片大小等信息
def get_origin_data(img_folder, ann_folder):
    # 通过标注文件去寻找原始图片文件,返回为字典的形式
    # key:图片路径 value: 目标信息数组
    imgpath_objects = []
    img_names = os.listdir(img_folder)
    for img_name in img_names:
        ann_name = img_name.split(".")[0] + '.txt'
        img_path = os.path.join(img_folder, img_name)
        ann_path = os.path.join(ann_folder, ann_name)
        # 读取txt文件
        objects = parse_txt(img_path, ann_path)
        # 构建字典
        one_imgpath_objects = {}
        one_imgpath_objects[img_path] = objects
        imgpath_objects.append(one_imgpath_objects)
    # 返回的是字典数组,每个字典文件的key表示的图片路径,value是这个图片object的值
    return imgpath_objects


def set_yolo_txt(imgpath_objects, save_folder = "E:/dddata/oher_format/yolo"):
    # 写成for循环,保存为yolo的形式,结束
    for one_imgpath_objects in imgpath_objects:
        # print(one_imgpath_objects)
        for img_path, objects in one_imgpath_objects.items():
            img_name = img_path.split(".")[0].split("\\")[-1]
            yolo_objects = []
            for object in objects:
                yolo_object = []
                x_min = object[0]
                y_min = object[1]
                x_max = object[2]
                y_max = object[3]
                label_id = object[4]
                width = object[5]
                height = object[6]
                center_x = (x_max+x_min)*0.5/width
                center_y = (y_max+y_min)*0.5/height
                w = (x_max-x_min)*1./width
                h = (y_max-y_min)*1./height
                yolo_object.append(label_id)
                yolo_object.append(center_x)
                yolo_object.append(center_y)
                yolo_object.append(w)
                yolo_object.append(h)
                yolo_objects.append(yolo_object)
            print(yolo_objects)
            save_name = img_name + '.txt'
            # save_folder = "F:/datas/遥感数据集/NWPU VHR-10/yolo"

            save_txt(save_folder, save_name, yolo_objects)





if __name__ == '__main__':
    # 1. 返回字典信息
    imgpath_objects = get_origin_data(img_folder="E:/dddata/NWPU VHR-10 dataset/positive image set", ann_folder="E:/dddata/NWPU VHR-10 dataset/ground truth")
    # 2. 对字典信息进行处理,保存再txt文件中
    set_yolo_txt(imgpath_objects = imgpath_objects, save_folder="E:/dddata/oher_format/yolo")

    # parse_txt('F:/datas/遥感数据集/NWPU VHR-10/o/ground truth/003.txt')
    # parse_img("003.jpg")
    # parse_img("test.jpg")

VOC格式

voc形式的数据集一般包含以下几个文件夹

  • Annotations
  • ImageSets
  • JPEGImages
  • SegmentationClass
  • SegmentationObject

对于目标检测的任务而言,一般只需要用到Annotations文件夹和JPEGImages文件夹,其中测试图片和训练图片都保存在一个目录下面,通过ImageSets文件夹中Main子文件夹下面的txt文件来对测试图片和验证图片进行解析。

JPEGImages主要提供的是PASCAL VOC所提供的所有的图片信息,包括训练图片,测试图片
这些图像就是用来进行训练和测试验证的图像数据。

Annotations主要存放xml格式的标签文件,每个xml对应JPEGImage中的一张图片

ImageSetsMain图像物体识别的数据,总共20类, 需要保证train val没有交集

SegmentationObject & SegmentationClass保存的是物体分割后的数据,在物体识别中没有用到

下面是标注文件的示例:

<annotation>  
    <folder>VOC2012</folder>                             
    <filename>2007_000392.jpg</filename>                             //文件名  
    <source>                                                         //图像来源(不重要)  
        <database>The VOC2007 Database</database>  
        <annotation>PASCAL VOC2007</annotation>  
          
    </source>  
    <size>                                            //图像尺寸(长宽以及通道数)                        
        <width>500</width>  
        <height>332</height>  
        <depth>3</depth>  
    </size>  
    <segmented>1</segmented>            //是否用于分割(在图像物体识别中01无所谓)  
    <object>                              //检测到的物体  
        <name>horse</name>                                         //物体类别  
        <pose>Right</pose>                                         //拍摄角度  
        <truncated>0</truncated>                                   //是否被截断(0表示完整)  
        <difficult>0</difficult>                                   //目标是否难以识别(0表示容易识别)  
        <bndbox>                                                   //bounding-box(包含左下角和右上角xy坐标)  
            <xmin>100</xmin>  
            <ymin>96</ymin>  
            <xmax>355</xmax>  
            <ymax>324</ymax>  
        </bndbox>  
    </object>  
    <object>              //检测到多个物体  
        <name>person</name>  
        <pose>Unspecified</pose>  
        <truncated>0</truncated>  
        <difficult>0</difficult>  
        <bndbox>  
            <xmin>198</xmin>  
            <ymin>58</ymin>  
            <xmax>286</xmax>  
            <ymax>197</ymax>  
        </bndbox>  
    </object>  
</annotation> 

一般而言,有图片的名称、长和宽等信息,之后一张图片对应有多个object,每个object中包含了类名、然后三个对于目标检测而言不重要的信息,以及bndbox信息,bndbox信息十分重要,主要是左上角的坐标和右下角的坐标。

通过下列代码讲原始的标注信息转化为xml形式:

import os
import numpy as np
from PIL import Image
import xml.etree.ElementTree as ET
from xml.dom.minidom import *
# from xml.dom.minidom import
# voc格式的目标需要转化为xml文件
# todo 数据集类名
classes_names = ['airplane', 'ship', 'storage tank', 'baseball diamond', 'tennis court', 'basketball court', 'ground track field', 'harbor', 'bridge', 'vehicle']

# 1. 前面保持不变,还是把标注的基本信息转化回来
# 本地文件夹:E:\dddata\NWPU VHR-10 dataset
# 解析标注文件
def parse_txt(img_path, ann_path):
    objects = []
    ann_lines = np.loadtxt(ann_path, dtype=str, delimiter=',')
    n_dim = ann_lines.ndim
    # print(ann_lines.ndim)
    # print(ann_lines)
    img = Image.open(img_path)
    width, height = img.size
    if n_dim == 1:
        object = []
        x_min = int(ann_lines[0][1:])
        y_min = int(ann_lines[1][:-1])
        x_max = int(ann_lines[2][1:])
        y_max = int(ann_lines[3][:-1])
        label_id = int(ann_lines[4])
        object.append(x_min)
        object.append(y_min)
        object.append(x_max)
        object.append(y_max)
        object.append(label_id)
        object.append(width)
        object.append(height)
        objects.append(object)
        # print(x_min, y_min, x_max, y_max, lable_id)
    else:
        for ann_line in ann_lines:
            object = []
            x_min = int(ann_line[0][1:])
            y_min = int(ann_line[1][:-1])
            x_max = int(ann_line[2][1:])
            y_max = int(ann_line[3][:-1])
            label_id = int(ann_line[4])
            object.append(x_min)
            object.append(y_min)
            object.append(x_max)
            object.append(y_max)
            object.append(label_id)
            object.append(width)
            object.append(height)
            objects.append(object)
    # print(objects)
    return objects


# 首先从原始文件中提取出文件路径, 文件名,以及图片大小等信息
def get_origin_data(img_folder, ann_folder):
    # 通过标注文件去寻找原始图片文件,返回为字典的形式
    # key:图片路径 value: 目标信息数组
    imgpath_objects = []
    img_names = os.listdir(img_folder)
    for img_name in img_names:
        ann_name = img_name.split(".")[0] + '.txt'
        img_path = os.path.join(img_folder, img_name)
        ann_path = os.path.join(ann_folder, ann_name)
        # 读取txt文件
        objects = parse_txt(img_path, ann_path)
        # 构建字典
        one_imgpath_objects = {}
        one_imgpath_objects[img_path] = objects
        imgpath_objects.append(one_imgpath_objects)
    # 返回的是字典数组,每个字典文件的key表示的图片路径,value是这个图片object的值
    return imgpath_objects

# 美化xml文件
def pretty_xml(element, indent, newline, level=0):  # elemnt为传进来的Elment类,参数indent用于缩进,newline用于换行
    if element:  # 判断element是否有子元素
        if (element.text is None) or element.text.isspace():  # 如果element的text没有内容
            element.text = newline + indent * (level + 1)
        else:
            element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * (level + 1)
            # else:  # 此处两行如果把注释去掉,Element的text也会另起一行
            # element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * level
    temp = list(element)  # 将element转成list
    for subelement in temp:
        if temp.index(subelement) < (len(temp) - 1):  # 如果不是list的最后一个元素,说明下一个行是同级别元素的起始,缩进应一致
            subelement.tail = newline + indent * (level + 1)
        else:  # 如果是list的最后一个元素, 说明下一行是母元素的结束,缩进应该少一个
            subelement.tail = newline + indent * level
        pretty_xml(subelement, indent, newline, level=level + 1)  # 对子元素进行递归操作


# 写入xml文件
def write_xml(img_name, width, height, object_dicts, save_path, folder='NWPU VHR-10 dataset'):
    '''
    object_dict = {'name': classes[int(object_category)],
                            'truncated': int(truncation),
                            'difficult': int(occlusion),
                            'xmin':int(bbox_left),
                            'ymin':int(bbox_top),
                            'xmax':int(bbox_left) + int(bbox_width),
                            'ymax':int(bbox_top) + int(bbox_height)
                            }
    '''
    doc = Document
    root = ET.Element('Annotation')
    ET.SubElement(root, 'folder').text = folder
    ET.SubElement(root, 'filename').text = img_name
    size_node = ET.SubElement(root, 'size')
    ET.SubElement(size_node, 'width').text = str(width)
    ET.SubElement(size_node, 'height').text = str(height)
    ET.SubElement(size_node, 'depth').text = '3'
    for object_dict in object_dicts:
        object_node = ET.SubElement(root, 'object')
        ET.SubElement(object_node, 'name').text = object_dict['name']
        ET.SubElement(object_node, 'pose').text = 'Unspecified'
        ET.SubElement(object_node, 'truncated').text = str(object_dict['truncated'])
        ET.SubElement(object_node, 'difficult').text = str(object_dict['difficult'])
        bndbox_node = ET.SubElement(object_node, 'bndbox')
        ET.SubElement(bndbox_node, 'xmin').text = str(object_dict['xmin'])
        ET.SubElement(bndbox_node, 'ymin').text = str(object_dict['ymin'])
        ET.SubElement(bndbox_node, 'xmax').text = str(object_dict['xmax'])
        ET.SubElement(bndbox_node, 'ymax').text = str(object_dict['ymax'])
    pretty_xml(root, '\t', '\n')
    tree = ET.ElementTree(root)
    tree.write(save_path, encoding='utf-8')


# 关键在于数据怎么写,前面基本一致
def set_voc_xml(imgpath_objects, save_folder = "E:/dddata/oher_format/voc"):
    # 写成for循环,保存为yolo的形式,结束
    for one_imgpath_objects in imgpath_objects:
        # print(one_imgpath_objects)
        # 转化为xml所适用的字典列表
        # object_dict = {'name': classes[int(object_category)],
        #                'truncated': int(truncation),
        #                'difficult': int(occlusion),
        #                'xmin': int(bbox_left),
        #                'ymin': int(bbox_top),
        #                'xmax': int(bbox_left) + int(bbox_width),
        #                'ymax': int(bbox_top) + int(bbox_height)
        #                }
        for img_path, objects in one_imgpath_objects.items():
            img_name = img_path.split(".")[0].split("\\")[-1]
            voc_objects = []
            width = 0
            height = 0
            for object in objects:
                # 这边实际上不需要太多的处理
                x_min = object[0]
                y_min = object[1]
                x_max = object[2]
                y_max = object[3]
                label_id = int(object[4])
                width_one = object[5]
                height_one = object[6]
                width = width_one
                height = height_one
                voc_object = {
                    'name': classes_names[int(label_id -1)],
                    'truncated': 0,
                    'difficult': 0,
                    'xmin': int(x_min),
                    'ymin': int(y_min),
                    'xmax': int(x_max),
                    'ymax': int(y_max)
                }
                voc_objects.append(voc_object)
            save_name = img_name + '.xml'
            save_path = os.path.join(save_folder, save_name)
            print(save_name)
            write_xml(img_name, width, height, voc_objects, save_path, folder='NWPU VHR-10 dataset')


if __name__ == '__main__':
    imgpath_objects = get_origin_data(img_folder="E:/dddata/NWPU VHR-10 dataset/positive image set",
                                      ann_folder="E:/dddata/NWPU VHR-10 dataset/ground truth")
    # 2. 对字典信息进行处理,保存再txt文件中
    set_voc_xml(imgpath_objects=imgpath_objects, save_folder="E:/dddata/oher_format/voc")

coco格式

参考:https://blog.csdn.net/qq_41375609/article/details/94737915

coco数据集为json文件,一般包含5个字段

  • info
  • images
  • annotations
  • licenses
  • categories

这次我拿到的数据集是关于红外图像的数据集,每个字段的含义如下

info字段

info字段包含了数据集的基本信息,包括数据集的来源,提供者之类的,内容如下:

info: {
    "year": int,# 年份
    "version": str,# 版本
    "description": str, # 数据集描述
    "contributor": str,# 提供者
    "url": str,# 下载地址
    "date_created": datetime
}

示例如下:

"info":{
	"description":"This is stable 1.0 version of the 2014 MS COCO dataset.",
	"url":"http:\/\/mscoco.org",
	"version":"1.0","year":2014,
	"contributor":"Microsoft COCO group",
	"date_created":"2015-01-27 09:11:52.357475"
},

info字段在写程序的时候一般不会使用到

licenses字段

licenses字段表明了图片的版权信息之类的,一般的程序中也用不到

licenses字段的结构如下:

license{
    "id": int,
    "name": str,
    "url": str,
} 

licenses字段的示例如下:

{
	"url":"http:\/\/creativecommons.org\/licenses\/by-nc-sa\/2.0\/",
	"id":1,
	"name":"Attribution-NonCommercial-ShareAlike License"
}

images字段

images字段是整个json文件中最重要的字段之一,包含了图片的基本信息,包括图片的名称,宽高。images目录由多个image构成数组,可以遍历,结构如下:

image{
    "id": int,# 图片的ID编号(每张图片ID是唯一的)
    "width": int,#宽
    "height": int,#高
    "file_name": str,# 图片名
    "license": int,
    "flickr_url": str,# flickr网路地址
    "coco_url": str,# 网路地址路径
    "date_captured": datetime # 数据获取日期
}

示例如下:

{
	"license":3,
	"file_name":"COCO_val2014_000000391895.jpg",
	"coco_url":"http:\/\/mscoco.org\/images\/391895",
	"height":360,"width":640,"date_captured":"2013-11-14 11:18:45",
	"flickr_url":"http:\/\/farm9.staticflickr.com\/8186\/8119368305_4e622c8349_z.jpg",
	"id":391895
}

每一个image的实例是一个dict。其中有一个id字段,代表的是图片的id,每一张图片具有唯一的一个独特的id。

annotations字段

存储图片的标注信息,结构如下:

annotation{
    "id": int, # 对象ID,因为每一个图像有不止一个对象,所以要对每一个对象编号(每个对象的ID是唯一的)
    "image_id": int,# 对应的图片ID(与images中的ID对应)
    "category_id": int,# 类别ID(与categories中的ID对应)
    "segmentation": RLE or [polygon],# 对象的边界点(边界多边形,此时iscrowd=0)。
    #segmentation格式取决于这个实例是一个单个的对象(即iscrowd=0,将使用polygons格式)还是一组对象(即iscrowd=1,将使用RLE格式)
    "area": float,# 区域面积
    "bbox": [x,y,width,height], # 定位边框 [x,y,w,h]
    "iscrowd": 0 or 1 #见下
}

其中注意这里的bbox格式为[x,y,width,height], # 定位边框 [x,y,w,h],是没有进行归一化的xywh,所以在进行yolo的转化时需要进行归一化的处理,示例如下:
比如coco2017train或coco2017val数据集中标注的目标(类别)位置在 Annotations 中以 (x, y, width, height) 来进行表示,x,y表示bbox左上角位置,width, height表示bbox的宽和高。而YOLO训练或者进行验证的时候读取的标注格式是以 (xmin, ymin, xmax, ymax) 来进行表示,xmin, ymin表示bbox左上角位置, xmax, ymax表示bbox右下角位置。

{
	"segmentation": [[510.66,423.01,511.72,420.03,510.45......]],
	"area": 702.1057499999998,
	"iscrowd": 0,
	"image_id": 289343,
	"bbox": [473.07,395.93,38.65,28.67],
	"category_id": 18,
	"id": 1768
}

categories字段

这个字段主要是记录annotations字段中的类别信息,结构如下:

{
	"supercategory": str,# 主类别
    "id": int,# 类对应的id (0 默认为背景)
    "name": str # 子类别
}

示例如下:

{
	"supercategory": "person",
	"id": 1,
	"name": "person"
},
{
	"supercategory": "vehicle",
	"id": 2,
	"name": "bicycle"
}

附录

voc与voc之间的转化公式

# object_dict = {'name': class_names_detrac[int(content[0])],
                #                'truncated': 0,
                #                'difficult': 0,
                #                'xmin': int(center_x - bbox_width / 2),
                #                'ymin': int(center_y - bbox_height / 2),
                #                'xmax': int(center_x + bbox_width / 2),
                #                'ymax': int(center_y + bbox_height / 2)
                #                }

代码如下,处理的比较乱,后面再调整

import json
import copy
import numpy as np
import os
import shutil


# 修正框
# 有个问题是数据集中的名称是按照。。执行的
def correct_objects(objects, width, height):
    yolo_objects = []
    for object in objects:
        # 原始数据是xywh
        x_min = object[0]
        y_min = object[1]
        box_w = object[2]
        box_h = object[3]
        if x_min < 0 or y_min < 0 or box_w < 0 or box_h < 0:
            pass
        else:
            x_max = x_min + box_w
            y_max = y_min + box_h
            label_id = object[4]
            center_x = (x_max + x_min) * 0.5 / width
            center_y = (y_max + y_min) * 0.5 / height
            wx = (x_max - x_min) * 1. / width
            hx = (y_max - y_min) * 1. / height
            yolo_object = []
            yolo_object.append(label_id)
            yolo_object.append(center_x)
            yolo_object.append(center_y)
            yolo_object.append(wx)
            yolo_object.append(hx)
            if (center_x < 1) and (center_y <1) and (wx<1) and (hx <1):
                yolo_objects.append(yolo_object)
    return yolo_objects


def save_txt(save_folder, save_name, yolo_objects):
    save_path = os.path.join(save_folder, save_name)
    yolo_objects = np.array(yolo_objects)
    # todo numpy.savetxt(fname,X,fmt ='%。18e',delimiter ='',
    # np.savetxt('data/task.txt', self.task, fmt="%d", delimiter=" ")
    # np.savetxt(save_path, yolo_objects, fmt ='%s', delimiter ='\n')
    np.savetxt(save_path, yolo_objects, fmt="%d %.18f %.18f %.18f %.18f", delimiter=' ')


with open("aauRainSnow-thermal.json", "r") as load_f:
    load_dict = json.load(load_f)
    # print(load_dict)
    print("字段信息:")
    for key, value in load_dict.items():
        print(key)
    # 首先把每个字段的信息记录并保存下来
    info = load_dict['info']
    images = load_dict['images']
    annotations = load_dict['annotations']
    licenses = load_dict['licenses']
    categories = load_dict['categories']
    # 这边的categories是从1开始的,应该转化为从0开始
    # print(categories)
    categories_list = []
    for category in categories:
        categories_list.append(category['name'])
    print("种类:{}种".format(len(categories_list)))
    # 首先对图片做一个遍历
    # 初始化image_path_objects
    tmp_dict_list = {}
    for image in images:
        file_name = image['file_name']
        cam_id = int(file_name.split('cam')[-1][0])
        if cam_id == 2:
            image_copy = copy.copy(image)
            image_copy['objects'] = []
            # 然后存在tmp数据中
            # 为了方便查询,把图片的id信息作为key进行保存
            tmp_dict = {}
            tmp_dict_list[image_copy['id']] = image_copy
            # tmp_dict_list.append(tmp_dict)
    # 遍历标注信息,把标注信息存入
    for ann in annotations:
        ann_img_id = ann['image_id']
        ann_bbox = ann['bbox']
        ann_category_id = ann['category_id'] - 1
        ann_bbox.append(ann_category_id)
        # 添加主信息中
        # print(tmp_dict_list[ann_img_id])
        tmp_dict_list[ann_img_id]['objects'].append(ann_bbox)

    # imgpath_objects
    # todo:在这里不用返回直接做写入操作:
    # imgpath_objects = []
    for value in tmp_dict_list.values():
        file_name = value["file_name"]
        file_just_name = file_name.split("/")[-1].split(".")[0]
        print(file_just_name)
        width = value['width']
        height = value['height']
        objects = value['objects']
        # 现在需要对objects做修正
        yolo_objects = correct_objects(objects, width, height)
        save_txt_name = file_just_name + '.txt'
        # print(yolo_objects)
        if len(yolo_objects) > 0:
            # todo 保存文本文件
            save_txt(save_folder="F:/datas/红外/AAU-RainSnow精简/aaurainsnow/v2/yolo_anns", save_name=save_txt_name, yolo_objects=np.array(yolo_objects))
            # todo 移动图片文件
            img_name = file_name.split("/")[-1]
            image_target_path = os.path.join("F:/datas/红外/AAU-RainSnow精简/aaurainsnow/v2/yolo_images", img_name)
            image_src_path = os.path.join("F:/datas/红外/AAU-RainSnow精简/aaurainsnow", file_name)
            shutil.copy(image_src_path, image_target_path)

致谢

https://zhuanlan.zhihu.com/p/255256266?utm_source=wechat_session

xxx

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

肆十二

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值