yolo2coco代码:
import json
import glob
import os
import cv2
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from PIL import Image, ImageDraw, ImageFont
import numpy as np
def calculate_polygon_area(polygon):
x = polygon[:, 0]
y = polygon[:, 1]
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def calculate_bounding_box(polygon):
x_min = np.min(polygon[:, 0])
y_min = np.min(polygon[:, 1])
x_max = np.max(polygon[:, 0])
y_max = np.max(polygon[:, 1])
width = x_max - x_min
height = y_max - y_min
return [x_min, y_min, width, height]
def text_to_json_segmentation(in_labels, in_images, out_json):
"""
Convert instance segmentation dataset from text files generated by the function 'json_to_text_segmentation'
(for YOLO) to a JSON file (for MMdet). This can be applied for Level 0/1/2 (must modify the last code)
:param in_labels: input folder containing the label text files
:param in_images: input folder containing the image files (just for getting the image size)
:param out_json: output JSON file
"""
# Initialize the output JSON file
data = dict()
data['annotations'] = []
data['images'] = []
# Initial the number of annotations
num_annotations = 1 # index starts from 1
# Process the text files
txt_files = glob.glob(in_labels + '/*.txt')
for k in range(len(txt_files)):
# Read the image to get image width and height
img = Image.open(in_images + '/' + os.path.basename(txt_files[k]).replace('txt', 'jpg'))
image_width, image_height = img.size
# Creates annotation items of the image and append them to the list
with open(txt_files[k]) as f:
for line in f:
# Get annotation information of each line in the text file
line = [float(x) for x in line.strip().split()]
class_id = int(line[0]) + 1 # index starts from 1
coordinates = line[1:]
polygon = np.array(coordinates).reshape(-1, 2)
polygon[:, 0] = polygon[:, 0] * image_width
polygon[:, 1] = polygon[:, 1] * image_height
area = calculate_polygon_area(polygon)
bbox = calculate_bounding_box(polygon)
# Create a new annotation item
ann_item = dict()
ann_item['segmentation'] = [polygon.flatten().tolist()]
ann_item['area'] = area
ann_item['iscrowd'] = 0
ann_item['image_id'] = k + 1 # index starts from 1
ann_item['bbox'] = bbox
ann_item['category_id'] = class_id
ann_item['id'] = num_annotations
data['annotations'].append(ann_item)
num_annotations += 1
# Create a new image item and append it to the list
img_item = dict()
img_item['id'] = k + 1 # index starts from 1
img_item['file_name'] = os.path.basename(txt_files[k]).replace('txt', 'jpg')
img_item['height'] = image_height
img_item['width'] = image_width
data['images'].append(img_item)
print(os.path.basename(txt_files[k]) + ' done')
data['categories'] = [{'supercategory': 'class1', 'id': 1, 'name': 'class1'}]
# Write the dictionary to a JSON file
print('Writing the data to a JSON file')
with open(out_json, 'w') as f:
# json.dump(data, f, cls=NpEncoder)
# f.write(json.dumps(data, cls=NpEncoder, indent=4))
f.write(json.dumps(data, default=int, indent=4))
if __name__ == '__main__':
# Convert the segmentation text files to JSON
text_to_json_segmentation(in_labels='labels/test',
in_images='images/test',
out_json='instances_test2017.json')