标注用 labelme
标注完成以后放在一个文件夹里面
创建一个文件夹 比葫芦画瓢和我这样放
import os
import numpy as np
import codecs
import json
from glob import glob
import cv2
import shutil
from sklearn.model_selection import train_test_split
# 1.标签路径
labelme_path =r"F:\1\2/" # 原始labelme标注数据路径
saved_path = "./VOC2007/" # 保存路径
isUseTest = True # 是否创建test集
print (labelme_path)
# 2.创建要求文件夹
if not os.path.exists(saved_path + "Annotations"): # xmls_path
os.makedirs(saved_path + "Annotations")
if not os.path.exists(saved_path + "JPEGImages/"): # images_path
os.makedirs(saved_path + "JPEGImages/")
if not os.path.exists(saved_path + "ImageSets/Main/"): # train_val_txt_path
os.makedirs(saved_path + "ImageSets/Main/")
# 3.获取待处理文件
files = glob(labelme_path + "*.json")
files = [i.replace("\\", "/").split("/")[-1].split(".json")[0] for i in files]
# print(files)
# 4.读取标注信息并写入 xml
for json_file_ in files:
json_filename = labelme_path + json_file_ + ".json"
json_file = json.load(open(json_filename, "r", encoding="utf-8"))
height, width, channels = cv2.imread(labelme_path + json_file_ + ".jpg").shape
with codecs.open(saved_path + "Annotations/" + json_file_ + ".xml", "w", "utf-8") as xml:
xml.write('<annotation>\n')
xml.write('\t<folder>' + 'VOC_data' + '</folder>\n')
xml.write('\t<filename>' + json_file_ + ".jpg" + '</filename>\n')
xml.write('\t<source>\n')
xml.write('\t\t<database>VOC Data</database>\n')
xml.write('\t\t\n')
xml.write('\t\t<flickrid>NULL</flickrid>\n')
xml.write('\t</source>\n')
xml.write('\t<size>\n')
xml.write('\t\t<width>' + str(width) + '</width>\n')
xml.write('\t\t<height>' + str(height) + '</height>\n')
xml.write('\t\t<depth>' + str(channels) + '</depth>\n')
xml.write('\t</size>\n')
xml.write('\t\t<segmented>0</segmented>\n')
for multi in json_file["shapes"]:
points = np.array(multi["points"])
labelName = multi["label"]
xmin = min(points[:, 0])
xmax = max(points[:, 0])
ymin = min(points[:, 1])
ymax = max(points[:, 1])
label = multi["label"]
if xmax <= xmin:
pass
elif ymax <= ymin:
pass
else:
xml.write('\t<object>\n')
xml.write('\t\t<name>' + labelName + '</name>\n')
xml.write('\t\t<pose>Unspecified</pose>\n')
xml.write('\t\t<truncated>1</truncated>\n')
xml.write('\t\t<difficult>0</difficult>\n')
xml.write('\t\t<bndbox>\n')
xml.write('\t\t\t<xmin>' + str(int(xmin)) + '</xmin>\n')
xml.write('\t\t\t<ymin>' + str(int(ymin)) + '</ymin>\n')
xml.write('\t\t\t<xmax>' + str(int(xmax)) + '</xmax>\n')
xml.write('\t\t\t<ymax>' + str(int(ymax)) + '</ymax>\n')
xml.write('\t\t</bndbox>\n')
xml.write('\t</object>\n')
# print(json_filename, xmin, ymin, xmax, ymax, label)
xml.write('</annotation>')
# 5.将图片复制到 VOC2007/JPEGImages/目录下
image_files = glob(labelme_path + "*.jpg")
print("copy image files to VOC007/JPEGImages/")
for image in image_files:
shutil.copy(image, saved_path + "JPEGImages/")
# 6.split files for txt
txtsavepath = saved_path + "ImageSets/Main/"
ftrainval = open(txtsavepath + '/trainval.txt', 'w')
ftest = open(txtsavepath + '/test.txt', 'w')
ftrain = open(txtsavepath + '/train.txt', 'w')
fval = open(txtsavepath + '/val.txt', 'w')
total_files = glob("data/VOCdevkit/VOC2007/Annotations/*.xml")
total_files = [i.replace("\\", "/").split("\\")[-1].split(".xml")[0] for i in total_files]
trainval_files = []
test_files = []
print(total_files)
print(trainval_files)
if isUseTest:
trainval_files, test_files = train_test_split(total_files, test_size=0.3, random_state=55)
else:
trainval_files = total_files
for file in trainval_files:
ftrainval.write(file + "\n")
# split
train_files, val_files = train_test_split(trainval_files, test_size=0.3, random_state=55)
# train
for file in train_files:
ftrain.write(file + "\n")
# val
for file in val_files:
fval.write(file + "\n")
for file in test_files:
# print(file)
ftest.write(file + "\n")
ftrainval.close()
ftrain.close()
fval.close()
ftest.close()
修改
运行 发现错误 ValueError:当 n_samples=0、test_size=0.3 和 train_size=None 时,生成的训练集将为空。调整任何上述参数。
简单查了下
这种错误一般有 路径不对
json 和 图片不在一个文件夹里面
sklearn 版本不对 或者 train_test_split 的参数写的不对(感觉和这没有关系,我的sklearn 1.0.2 )
发现原因
我们没有这个文件夹
所以改成下面这样
已经成功了
检查一下
在训练的时候老出现路径错误
把4个txt 删掉 跑下面这个脚本
import os
import random
random.seed(0)
xmlfilepath = r'./VOC2007/Annotations'
saveBasePath = r"./VOC2007/ImageSets/Main"
trainval_percent = 0.8
train_percent = 1
temp_xml = os.listdir(xmlfilepath)
total_xml = []
for xml in temp_xml:
if xml.endswith(".xml"):
total_xml.append(xml)
num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)
print("train and val size", tv)
print("traub suze", tr)
ftrainval = open(os.path.join(saveBasePath, 'trainval.txt'), 'w')
ftest = open(os.path.join(saveBasePath, 'test.txt'), 'w')
ftrain = open(os.path.join(saveBasePath, 'train.txt'), 'w')
fval = open(os.path.join(saveBasePath, 'val.txt'), 'w')
for i in list:
name = total_xml[i][:-4] + '\n'
if i in trainval:
ftrainval.write(name)
if i in train:
ftrain.write(name)
else:
fval.write(name)
else:
ftest.write(name)
ftrainval.close()
ftrain.close()
fval.close()
ftest.close()
效果
4个TXT 里面都有东西 voc 2007 数据集制作完成。
修改类别
修改为自己的类别名
写成我这样 把原来的删掉
data_dir=写绝对路径
windows 下work=1
先跑
python setup.py install
训练命令
python tools/train.py -f exps/example/yolox_voc/yolox_voc_s.py -d 1 -b 1 --fp16 -c w/yolox_s.pth
等着就行了
帮助:
断了 用下面这个命令接着训练
python tools/train.py -f exps/example/yolox_voc/yolox_voc_s.py -d 1 -b 1 --fp16 -c F:\YOLOX-main\YOLOX_outputs\yolox_voc_s\latest_ckpt.pth --resume
修改 json的类别 比如 之前是 老头 改成 laoto
# -*- encoding: utf-8 -*-
import os
import json
def Edit_label(json_dir, new_label='zangdian'):
json_files = os.listdir(json_dir)
json_dict = {}
for json_file in json_files:
# 只获取json文件
if json_file[-4:] != 'json':
continue
jsonfile = json_dir + '/' + json_file
with open(jsonfile, 'r', encoding='utf-8') as jf:
info = json.load(jf)
# 找到位置进行修改
for i, label in enumerate(info['shapes']):
info['shapes'][i]['label'] = new_label
# 使用新字典替换修改后的字典
json_dict = info
# 将替换后的内容写入原文件
with open(jsonfile, 'w') as fw:
json.dump(json_dict, fw)
if __name__ == '__main__':
# 文件夹目录
json_dir = r'F:\w2'
new_label = 'zangdian'
Edit_label(json_dir, new_label)
print('OK!')
就好了