json_to_xml
import os
from json import loads
from dicttoxml import dicttoxml
from xml.dom.minidom import parseString
def jsonToXml(json_path, xml_path):
# @abstract: transfer json file to xml file
# json_path: complete path of the json file
# xml_path: complete path of the xml file
with open(json_path, 'r', encoding='UTF-8')as json_file:
load_dict = loads(json_file.read())
# print(load_dict)
my_item_func = lambda x: 'Annotation'
xml = dicttoxml(load_dict, custom_root='Annotations', item_func=my_item_func, attr_type=False)
dom = parseString(xml)
# print(dom.toprettyxml())
# print(type(dom.toprettyxml()))
with open(xml_path, 'w', encoding='UTF-8')as xml_file:
xml_file.write(dom.toprettyxml())
def json_to_xml(json_dir, xml_dir):
# transfer all json file which in the json_dir to xml_dir
if (os.path.exists(xml_dir) == False):
os.makedirs(xml_dir)
dir = os.listdir(json_dir)
for file in dir:
file_list = file.split(".")
if (file_list[-1] == 'json'):
jsonToXml(os.path.join(json_dir, file), os.path.join(xml_dir, file_list[0] + '.xml'))
if __name__ == '__main__':
# trandfer singal file
# j_path = "F:/work/jsontoxml/json/test.json"
# x_path = "F:/work/jsontoxml/json/test.xml"
# jsonToXml(j_path, x_path)
# transfer multi files
# j_dir = "F:/work/jsontoxml/json/"
j_dir = r"E:/BaiduNetdiskDownload/bdd100k_info/bdd100k/info/100k/val/"
x_dir = r"E:/BaiduNetdiskDownload/bdd100k_xml/val/"
json_to_xml(j_dir, x_dir)
xml_to_txt
import xml.etree.ElementTree as ET
import os
from os import getcwd
from os.path import join
import glob
sets = ['train', 'test'] # 分别保存训练集和测试集的文件夹名称
classes = ['1', '2', '3', '4', '5', '6'] # 标注时的标签
'''
xml中框的左上角坐标和右下角坐标(x1,y1,x2,y2)
》》txt中的中心点坐标和宽和高(x,y,w,h),并且归一化
'''
def convert(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[1]) / 2.0
y = (box[2] + box[3]) / 2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def convert_annotation(data_dir, imageset, image_id):
in_file = open(data_dir + '/%s_annotations/%s.xml' % (imageset, image_id)) # 读取xml
out_file = open(data_dir + '/%s_labels/%s.txt' % (imageset, image_id), 'w') # 保存txt
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls) # 获取类别索引
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert((w, h), b)
out_file.write(str(cls_id) + " " + " ".join([str('%.6f' % a) for a in bb]) + '\n')
wd = getcwd()
print(wd) # 当前路径
data_dir = 'F:/Postgraduate_time/label_fish'
for image_set in sets:
image_ids = []
for x in glob.glob(data_dir + '/%s_annotations' % image_set + '/*.xml'):
print(x)
image_ids.append(os.path.basename(x)[:-4])
print('\n%s数量:' % image_set, len(image_ids)) # 确认数量
i = 0
for image_id in image_ids:
i = i + 1
convert_annotation(data_dir, image_set, image_id)
print("%s 数据:%s/%s文件完成!" % (image_set, i, len(image_ids)))
print("Done!!!")
json_to_dataset
import base64
import json
import os
import os.path as osp
import numpy as np
import PIL.Image
from labelme import utils
'''
制作自己的语义分割数据集需要注意以下几点:
1、我使用的labelme版本是3.16.7,建议使用该版本的labelme,有些版本的labelme会发生错误,
具体错误为:Too many dimensions: 3 > 2
安装方式为命令行pip install labelme==3.16.7
2、此处生成的标签图是8位彩色图,与视频中看起来的数据集格式不太一样。
虽然看起来是彩图,但事实上只有8位,此时每个像素点的值就是这个像素点所属的种类。
所以其实和视频中VOC数据集的格式一样。因此这样制作出来的数据集是可以正常使用的。也是正常的。
'''
if __name__ == '__main__':
jpgs_path = "datasets/JPEGImages"
pngs_path = "datasets/SegmentationClass"
classes = ["_background_","aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# classes = ["_background_","cat","dog"]
count = os.listdir("./datasets/before/")
for i in range(0, len(count)):
path = os.path.join("./datasets/before", count[i])
if os.path.isfile(path) and path.endswith('json'):
data = json.load(open(path))
if data['imageData']:
imageData = data['imageData']
else:
imagePath = os.path.join(os.path.dirname(path), data['imagePath'])
with open(imagePath, 'rb') as f:
imageData = f.read()
imageData = base64.b64encode(imageData).decode('utf-8')
img = utils.img_b64_to_arr(imageData)
label_name_to_value = {'_background_': 0}
for shape in data['shapes']:
label_name = shape['label']
if label_name in label_name_to_value:
label_value = label_name_to_value[label_name]
else:
label_value = len(label_name_to_value)
label_name_to_value[label_name] = label_value
# label_values must be dense
label_values, label_names = [], []
for ln, lv in sorted(label_name_to_value.items(), key=lambda x: x[1]):
label_values.append(lv)
label_names.append(ln)
assert label_values == list(range(len(label_values)))
lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)
PIL.Image.fromarray(img).save(osp.join(jpgs_path, count[i].split(".")[0]+'.jpg'))
new = np.zeros([np.shape(img)[0],np.shape(img)[1]])
for name in label_names:
index_json = label_names.index(name)
index_all = classes.index(name)
new = new + index_all*(np.array(lbl) == index_json)
utils.lblsave(osp.join(pngs_path, count[i].split(".")[0]+'.png'), new)
print('Saved ' + count[i].split(".")[0] + '.jpg and ' + count[i].split(".")[0] + '.png')
json_to_png
import cv2
import numpy as np
import os
import shutil
import matplotlib.pyplot as plt
# json_dir = 'G:/json_filedir/' #json文件所在文件夹(注:文件夹中只能包含json文件)
# label_dir = 'G:/label_filedir/' #目标输出文件夹
json_dir = './datasets/before/' #json文件所在文件夹(注:文件夹中只能包含json文件)
label_dir = './datasets/SegmentationClass/' #目标输出文件夹
def json2png(json_folder, png_save_folder):
if os.path.isdir(png_save_folder): # 在Python编程语言中可以使用os.path.isdir()函数判断某一路径是否为目录。
shutil.rmtree(png_save_folder)#清除输出文件夹
os.makedirs(png_save_folder)#重新创建输出文件夹
json_files = os.listdir(json_folder)
for json_file in json_files:
json_path = os.path.join(json_folder, json_file)#生成单个源json文件的路径
os.system("labelme_json_to_dataset {}".format(json_path))#生成label文件夹及label.png文件
label_path = os.path.join(json_folder, json_file.split(".")[0] + "_json/label.png")#生成label.png的系统路径
temporary_path = os.path.join(json_folder, json_file.split(".")[0] + "_json")
png_save_path = os.path.join(png_save_folder, json_file.split(".")[0] + ".png")#生成目标目录
label_png = cv2.imread(label_path, 0)#读取label.png
cv2.imwrite(png_save_path, label_png)#将label.png文件重新保存在目标目录下
shutil.rmtree(temporary_path)#每次循环执行完后删除生成的临时文件,不删的话,再次执行时需要手动删除
#执行函数(批量转换)
json2png(json_folder=json_dir, png_save_folder=label_dir)
# test_img = cv2.imread('./datasets/JPEGImages/1.jpg')
test_img = cv2.imread('/home/robot/wyg/segformer-pytorch-master/segformer-pytorch-master/VOCdevkit/VOC2007/JPEGImages/0a0a0b1a-7c39d841_train_id.jpg')
test_label = cv2.imread('./datasets/SegmentationClass/1.png')
# test_img = cv2.imread('G:/test.jpg')
# test_label = cv2.imread('G:/label_filedir/test.png')
plt.figure(figsize=(16, 8))
plt.subplot(121), plt.imshow(test_img, 'gray')
plt.subplot(122), plt.imshow(test_label, 'gray')
plt.show()