在之前写过一篇也是关于COCO数据集的文章,但是在跑实验的时候,回归方面总是感觉很欠缺所以本人对之前的代码进行简单修改,且bbox回归结果比之前要好很多。
首先,从coco截取特定的类别,输出格式为XML
之前的代码输出的bbox在xml文件bbox的坐标是int类型,虽然相差不大,但依然会影响对BBOX的定位精度。
修改后将输出的xml文件bbox的坐标值为float类型
使用方法:
savepath = "填写保存的路径"
classes_names = ['填写自己想要的类别']
dataDir = '填写coco数据集的地址'
from pycocotools.coco import COCO
import os
import shutil
from tqdm import tqdm
import skimage.io as io
import matplotlib.pyplot as plt
import cv2
from PIL import Image, ImageDraw
# 需要设置的路径
savepath = "F:/coco-lei-fenbu/coco-surfboard/"
img_dir = savepath + 'images/'
anno_dir = savepath + 'annotations/'
datasets_list = ['train2017', 'val2017']
# coco有80类,这里写要提取类的名字,以person为例
classes_names = ['surfboard']
# 包含所有类别的原coco数据集路径
'''
目录格式如下:
$COCO_PATH
----|annotations
----|train2017
----|val2017
----|test2017
'''
dataDir = 'F:/coco/'
headstr = """\
<annotation>
<folder>VOC</folder>
<filename>%s</filename>
<source>
<database>My Database</database>
<annotation>COCO</annotation>

<flickrid>NULL</flickrid>
</source>
<owner>
<flickrid>NULL</flickrid>
<name>company</name>
</owner>
<size>
<width>%d</width>
<height>%d</height>
<depth>%d</depth>
</size>
"""
objstr = """\
<object>
<name>%s</name>
<bndbox>
<cx>%s</cx>
<cy>%s</cy>
<w>%s</w>
<h>%s</h>
</bndbox>
</object>
"""
tailstr = '''\
</annotation>
'''
# 检查目录是否存在,如果存在,先删除再创建,否则,直接创建
def mkr(path):
if not os.path.exists(path):
os.makedirs(path) # 可以创建多级目录
def id2name(coco):
classes = dict()
for cls in coco.dataset['categories']:
classes[cls['id']] = cls['name']
return classes
def write_xml(anno_path, head, objs, tail):
f = open(anno_path, "w")
f.write(head)
for obj in objs:
f.write(objstr % (obj[0], obj[1], obj[2], obj[3], obj[4]))
f.write(tail)
def save_annotations_and_imgs(coco, dataset, filename, objs):
# 将图片转为xml,例:COCO_train2017_000000196610.jpg-->COCO_train2017_000000196610.xml
dst_anno_dir = os.path.join(anno_dir, dataset)
mkr(dst_anno_dir)
anno_path = dst_anno_dir + '/' + filename[:-3] + 'xml'
img_path = dataDir + dataset + '/' + filename
print("img_path: ", img_path)
dst_img_dir = os.path.join(img_dir, dataset)
mkr(dst_img_dir)
dst_imgpath = dst_img_dir + '/' + filename
print("dst_imgpath: ", dst_imgpath)
img = cv2.imread(img_path)
# if (img.shape[2] == 1):
# print(filename + " not a RGB image")
# return
shutil.copy(img_path, dst_imgpath)
head = headstr % (filename, img.shape[1], img.shape[0], img.shape[2])
tail = tailstr
write_xml(anno_path, head, objs, tail)
def showimg(coco, dataset, img, classes, cls_id, show=True):
global dataDir
I = Image.open('%s/%s/%s' % (dataDir, dataset, img['file_name']))
# 通过id,得到注释的信息
annIds = coco.getAnnIds(imgIds=img['id'], catIds=cls_id, iscrowd=None)
# print(annIds)
anns = coco.loadAnns(annIds)
# print(anns)
# coco.showAnns(anns)
objs = []
for ann in anns:
class_name = classes[ann['category_id']]
if class_name in classes_names:
print(class_name)
if 'bbox' in ann:
bbox = ann['bbox']
xmin = float(bbox[0])
ymin = float(bbox[1])
xmax = float(bbox[2]) #bbox[2] + bbox[0]
ymax = float(bbox[3]) #bbox[3] + bbox[1]
obj = [class_name, xmin, ymin, xmax, ymax]
objs.append(obj)
draw = ImageDraw.Draw(I)
draw.rectangle([xmin, ymin, xmax, ymax])
if show:
plt.figure()
plt.axis('off')
plt.imshow(I)
plt.show()
return objs
for dataset in datasets_list:
# ./COCO/annotations/instances_train2017.json
annFile = '{}/annotations/instances_{}.json'.format(dataDir, dataset)
# 使用COCO API用来初始化注释数据
coco = COCO(annFile)
# 获取COCO数据集中的所有类别
classes = id2name(coco)
print(classes)
# [1, 2, 3, 4, 6, 8]
classes_ids = coco.getCatIds(catNms=classes_names)
print(classes_ids)
for cls in classes_names:
# 获取该类的id
cls_id = coco.getCatIds(catNms=[cls])
img_ids = coco.getImgIds(catIds=cls_id)
print(cls, len(img_ids))
# imgIds=img_ids[0:10]
for imgId in tqdm(img_ids):
img = coco.loadImgs(imgId)[0]
filename = img['file_name']
# print(filename)
objs = showimg(coco, dataset, img, classes, classes_ids, show=False)
print(objs)
save_annotations_and_imgs(coco, dataset, filename, objs)
其次,将转换后的XML文件恢复到JSON文件
之前的代码也是只能对bbox的int类型的坐标进行计算,经过简单的修改,已经可以使用float类型进行计算。
由于本人不使用segmentation,这个部分并没有修改。
同理area区域的计算也没有修改,因为不是太懂。
如果有比较懂得可以在下面评论,我会继续尝试修改
使用方法:
只需修改地址即可
import xml.etree.ElementTree as ET
import os
import json
coco = dict()
coco['images'] = []
coco['type'] = 'instances'
coco['annotations'] = []
coco['categories'] = []
category_set = dict()
image_set = set()
category_item_id = 0
image_id = 20210000000
annotation_id = 0
def addCatItem(name):
global category_item_id
category_item = dict()
category_item['supercategory'] = 'none'
category_item_id += 1
category_item['id'] = category_item_id
category_item['name'] = name
coco['categories'].append(category_item)
category_set[name] = category_item_id
return category_item_id
def addImgItem(file_name, size):
global image_id
if file_name is None:
raise Exception('Could not find filename tag in xml file.')
if size['width'] is None:
raise Exception('Could not find width tag in xml file.')
if size['height'] is None:
raise Exception('Could not find height tag in xml file.')
image_id += 1
image_item = dict()
image_item['id'] = image_id
image_item['file_name'] = file_name
image_item['width'] = size['width']
image_item['height'] = size['height']
coco['images'].append(image_item)
image_set.add(file_name)
return image_id
def addAnnoItem(object_name, image_id, category_id, bbox):
global annotation_id
annotation_item = dict()
annotation_item['segmentation'] = []
seg = []
# bbox[] is x,y,w,h
# left_top
seg.append(bbox[0])
seg.append(bbox[1])
# left_bottom
seg.append(bbox[0])
seg.append(bbox[1] + bbox[3])
# right_bottom
seg.append(bbox[0] + bbox[2])
seg.append(bbox[1] + bbox[3])
# right_top
seg.append(bbox[0] + bbox[2])
seg.append(bbox[1])
annotation_item['segmentation'].append(seg)
annotation_item['area'] = bbox[2] * bbox[3]
annotation_item['iscrowd'] = 0
annotation_item['ignore'] = 0
annotation_item['image_id'] = image_id
annotation_item['bbox'] = bbox
annotation_item['category_id'] = 1 #category_id
annotation_id += 1
annotation_item['id'] = annotation_id
coco['annotations'].append(annotation_item)
def parseXmlFiles(xml_path):
for f in os.listdir(xml_path):
if not f.endswith('.xml'):
continue
bndbox = dict()
size = dict()
current_image_id = None
current_category_id = None
file_name = None
size['width'] = None
size['height'] = None
size['depth'] = None
xml_file = os.path.join(xml_path, f)
print(xml_file)
tree = ET.parse(xml_file)
root = tree.getroot()
if root.tag != 'annotation':
raise Exception('pascal voc xml root element should be annotation, rather than {}'.format(root.tag))
# elem is <folder>, <filename>, <size>, <object>
for elem in root:
current_parent = elem.tag
current_sub = None
object_name = None
if elem.tag == 'folder':
continue
if elem.tag == 'filename':
file_name = elem.text
if file_name in category_set:
raise Exception('file_name duplicated')
# add img item only after parse <size> tag
elif current_image_id is None and file_name is not None and size['width'] is not None:
if file_name not in image_set:
current_image_id = addImgItem(file_name, size)
print('add image with {} and {}'.format(file_name, size))
else:
raise Exception('duplicated image: {}'.format(file_name))
# subelem is <width>, <height>, <depth>, <name>, <bndbox>
for subelem in elem:
bndbox['cx'] = None
bndbox['cy'] = None
bndbox['w'] = None
bndbox['h'] = None
current_sub = subelem.tag
if current_parent == 'object' and subelem.tag == 'name':
object_name = subelem.text
if object_name not in category_set:
current_category_id = addCatItem(object_name)
else:
current_category_id = category_set[object_name]
elif current_parent == 'size':
if size[subelem.tag] is not None:
raise Exception('xml structure broken at size tag.')
size[subelem.tag] = int(subelem.text)
# option is <xmin>, <ymin>, <xmax>, <ymax>, when subelem is <bndbox>
for option in subelem:
if current_sub == 'bndbox':
if bndbox[option.tag] is not None:
raise Exception('xml structure corrupted at bndbox tag.')
bndbox[option.tag] = float(option.text)
# only after parse the <object> tag
if bndbox['cx'] is not None:#'xmin'
if object_name is None:
raise Exception('xml structure broken at bndbox tag')
if current_image_id is None:
raise Exception('xml structure broken at bndbox tag')
if current_category_id is None:
raise Exception('xml structure broken at bndbox tag')
bbox = []
# x
bbox.append(bndbox['cx'])
# y
bbox.append(bndbox['cy'])
# w
bbox.append(bndbox['w'])
# h
bbox.append(bndbox['h'])
print('add annotation with {},{},{},{}'.format(object_name, current_image_id, current_category_id,
bbox))
addAnnoItem(object_name, current_image_id, current_category_id, bbox)
if __name__ == '__main__':
# 需要自己设定的地址,一个是已生成的是xml文件的父目录;一个是要生成的json文件的目录
xml_dir = r'F:\coco-lei-fenbu\jihe\annotations'
json_dir = r'F:\coco-lei-fenbu\jihe\annotations'
dataset_lists = ['val2017']
for dataset in dataset_lists:
xml_path = os.path.join(xml_dir, dataset)
json_file = json_dir + '/{}.json'.format(dataset)
parseXmlFiles(xml_path)
json.dump(coco, open(json_file, 'w'))
附加:
此代码只能用作数据集的划分,但是有个致命问题。比如说我在coco数据集上划分两千张图片,就只会得到固定的两前张图片。如果我划分4000张的图片,其中必定会包含之前的两千张图片。
其中的选择机理我并没有搞懂
# -*-coding:utf-8-*-
import json
import time
import shutil
import os
from collections import defaultdict
import json
from pathlib import Path
class COCO:
def __init__(self, annotation_file=None, origin_img_dir=""):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.origin_dir = origin_img_dir
self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict(), dict() # imgToAnns 一个图片对应多个注解(mask) 一个类别对应多个图片
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset) == dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time() - tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index 给图片->注解,类别->图片建立索引
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def build(self, tarDir=None, tarFile='./new.json', N=2300):
load_json = {'images': [], 'annotations': [], 'categories': [], 'type': 'instances', "info": {"description": "This is stable 1.0 version of the 2014 MS COCO dataset.", "url": "http:\/\/mscoco.org", "version": "1.0", "year": 2014, "contributor": "Microsoft COCO group", "date_created": "2015-01-27 09:11:52.357475"}, "licenses": [{"url": "http:\/\/creativecommons.org\/licenses\/by-nc-sa\/2.0\/", "id": 1, "name": "Attribution-NonCommercial-ShareAlike License"}, {"url": "http:\/\/creativecommons.org\/licenses\/by-nc\/2.0\/", "id": 2, "name": "Attribution-NonCommercial License"}, {"url": "http:\/\/creativecommons.org\/licenses\/by-nc-nd\/2.0\/",
"id": 3, "name": "Attribution-NonCommercial-NoDerivs License"}, {"url": "http:\/\/creativecommons.org\/licenses\/by\/2.0\/", "id": 4, "name": "Attribution License"}, {"url": "http:\/\/creativecommons.org\/licenses\/by-sa\/2.0\/", "id": 5, "name": "Attribution-ShareAlike License"}, {"url": "http:\/\/creativecommons.org\/licenses\/by-nd\/2.0\/", "id": 6, "name": "Attribution-NoDerivs License"}, {"url": "http:\/\/flickr.com\/commons\/usage\/", "id": 7, "name": "No known copyright restrictions"}, {"url": "http:\/\/www.usa.gov\/copyright.shtml", "id": 8, "name": "United States Government Work"}]}
if not Path(tarDir).exists():
Path(tarDir).mkdir()
for i in self.imgs:
if(N == 0):
break
tic = time.time()
img = self.imgs[i]
load_json['images'].append(img)
fname = os.path.join(tarDir, img['file_name'])
anns = self.imgToAnns[img['id']]
for ann in anns:
load_json['annotations'].append(ann)
if not os.path.exists(fname):
shutil.copy(self.origin_dir+'/'+img['file_name'], tarDir)
print('copy {}/{} images (t={:0.3f}s)'.format(i, N, time.time() - tic))
N -= 1
for i in self.cats:
load_json['categories'].append(self.cats[i])
with open(tarFile, 'w+') as f:
json.dump(load_json, f, indent=4)
coco = COCO('H:/coco-lei/coco-5lei-01/annotations/instances_train2017.json',
origin_img_dir='H:/coco-lei/coco-5lei-01/train2017') # 完整的coco数据集的图片和标注的路径
coco.build('labelGenerator/coco-5lei/train2017', 'labelGenerator/coco-5lei/instances_train2017.json', 2000) # 保存图片路径
coco = COCO('H:/coco-lei/coco-5lei-01/annotations/instances_val2017.json',
origin_img_dir='H:/coco-lei/coco-5lei-01/val2017') # 完整的coco数据集的图片和标注的路径
coco.build('labelGenerator/coco-5lei/val2017', 'labelGenerator/coco-5lei/instances_val2017.json', 736) # 保存图片路径
希望有清楚的小伙伴,能告知一二.
下面这个是,coco各个类型大的名称,以及每个类的数量汇总。
转载: