labelme经行标注
新建json_to_txt.py文件将标注的json文件转换成txt文件
# -*- coding: utf-8 -*-
# 实例分割,将json转换为txt
import json
import os
import argparse
from tqdm import tqdm
def convert_label_json(json_dir, save_dir, classes):
json_paths = os.listdir(json_dir)
classes = classes.split(',')
for json_path in tqdm(json_paths):
# for json_path in json_paths:
path = os.path.join(json_dir, json_path)
with open(path, 'r') as load_f:
json_dict = json.load(load_f)
h, w = json_dict['imageHeight'], json_dict['imageWidth']
# save txt path
txt_path = os.path.join(save_dir, json_path.replace('json', 'txt'))
txt_file = open(txt_path, 'w')
for shape_dict in json_dict['shapes']:
label = shape_dict['label']
label_index = classes.index(label)
points = shape_dict['points']
points_nor_list = []
for point in points:
points_nor_list.append(point[0] / w)
points_nor_list.append(point[1] / h)
points_nor_list = list(map(lambda x: str(x), points_nor_list))
points_nor_str = ' '.join(points_nor_list)
label_str = str(label_index) + ' ' + points_nor_str + '\n'
txt_file.writelines(label_str)
if __name__ == "__main__":
"""
python json2txt_nomalize.py --json-dir my_datasets/color_rings/jsons --save-dir my_datasets/color_rings/txts --classes "cat,dogs"
"""
parser = argparse.ArgumentParser(description='json convert to txt params')
parser.add_argument('--json-dir', type=str, default='D:/Progarm_Data/python_project/data/seg/seg-json', help='json path dir')
parser.add_argument('--save-dir', type=str, default='D:/Progarm_Data/python_project/data/seg/seg-txt', help='txt save dir')
parser.add_argument('--classes', type=str, default='cap,root', help='classes')
args = parser.parse_args()
json_dir = args.json_dir
save_dir = args.save_dir
classes = args.classes
convert_label_json(json_dir, save_dir, classes)
新建split.py划分数据集
# 将图片和标注数据按比例切分为 训练集和测试集
import shutil
import random
import os
import argparse
# 检查文件夹是否存在,如果不存在则创建它
# 目录创建函数
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def main(image_dir, txt_dir, save_dir):
# 创建文件夹
# 创建用于保存分割后数据集的主目录save_dir。
mkdir(save_dir)
images_dir = os.path.join(save_dir, 'images')
labels_dir = os.path.join(save_dir, 'labels')
img_train_path = os.path.join(images_dir, 'train')
img_test_path = os.path.join(images_dir, 'test')
img_val_path = os.path.join(images_dir, 'val')
label_train_path = os.path.join(labels_dir, 'train')
label_test_path = os.path.join(labels_dir, 'test')
label_val_path = os.path.join(labels_dir, 'val')
mkdir(images_dir);
mkdir(labels_dir);
mkdir(img_train_path);
mkdir(img_test_path);
mkdir(img_val_path);
mkdir(label_train_path);
mkdir(label_test_path);
mkdir(label_val_path);
# 数据集划分比例,训练集75%,验证集15%,测试集15%,按需修改
train_percent = 0.8
val_percent = 0.1
test_percent = 0.1
# 获取所有文件并计算数量
total_txt = os.listdir(txt_dir)
num_txt = len(total_txt)
list_all_txt = range(num_txt) # 范围 range(0, num)
# 计算各个集的数据数量
num_train = int(num_txt * train_percent)
num_val = int(num_txt * val_percent)
num_test = num_txt - num_train - num_val
# 随机抽样生成训练集和验证集
# random.sample 函数从 list_all_txt(包含所有文件的索引范围)中随机抽取 num_train 个索引,生成训练集。
# random.sample 函数确保从中选择的元素是唯一的,因此不会有重复的文件索引。
train = random.sample(list_all_txt, num_train)
# 生成验证集和测试集的候选集合
# 在全部数据集中取出train
val_test = [i for i in list_all_txt if not i in train]
# 随机抽样生成验证集
# 再从val_test取出num_val个元素,val_test剩下的元素就是test
val = random.sample(val_test, num_val)
# 打印训练集、验证集和测试集的文件数量。
print("训练集数目:{}, 验证集数目:{},测试集数目:{}".format(len(train), len(val), len(val_test) - len(val)))
for i in list_all_txt:
name = total_txt[i][:-4]
# image_dir 是一个字符串变量,包含图像文件所在目录的路径。
srcImage = os.path.join(image_dir, name + '.jpg')
srcLabel = os.path.join(txt_dir, name + '.txt')
if i in train:
dst_train_Image = os.path.join(img_train_path, name + '.jpg')
dst_train_Label = os.path.join(label_train_path, name + '.txt')
# srcImage这是源文件的路径,表示要被复制的文件。
# dst_train_Image这是目标文件的路径,表示要复制到的新文件
shutil.copyfile(srcImage, dst_train_Image)
shutil.copyfile(srcLabel, dst_train_Label)
elif i in val:
dst_val_Image = os.path.join(img_val_path, name + '.jpg')
dst_val_Label = os.path.join(label_val_path, name + '.txt')
shutil.copyfile(srcImage, dst_val_Image)
shutil.copyfile(srcLabel, dst_val_Label)
else:
dst_test_Image = os.path.join(img_test_path, name + '.jpg')
dst_test_Label = os.path.join(label_test_path, name + '.txt')
shutil.copyfile(srcImage, dst_test_Image)
shutil.copyfile(srcLabel, dst_test_Label)
if __name__ == '__main__':
"""
python split_datasets.py --image-dir my_datasets/color_rings/imgs --txt-dir my_datasets/color_rings/txts --save-dir my_datasets/color_rings/train_data
"""
# 这行代码创建一个ArgumentParser 对象,用于处理命令行参数。description 参数提供了一个描述,说明这个脚本的用途。
parser = argparse.ArgumentParser(description='split datasets to train,val,test params')
parser.add_argument('--image-dir', type=str, default='D:/Progarm_Data/python_project/data/seg/seg-image', help='image path dir')
parser.add_argument('--txt-dir', type=str, default='D:/Progarm_Data/python_project/data/seg/seg-txt', help='txt path dir')
parser.add_argument('--save-dir', default='D:/Progarm_Data/python_project/data/seg/split', type=str, help='save dir')
args = parser.parse_args()
image_dir = args.image_dir
txt_dir = args.txt_dir
save_dir = args.save_dir
main(image_dir, txt_dir, save_dir)
运行后划分的数据集格式
新建mushroom-seg.yaml 配置文件
# Ultralytics YOLO 🚀, AGPL-3.0 license
# COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics
# Documentation: https://docs.ultralytics.com/datasets/pose/coco8-pose/
# Example usage: yolo train data=coco8-pose.yaml
# parent
# ├── ultralytics
# └── datasets
# └── coco8-pose ← downloads here (1 MB)
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
# path: ../datasets/coco # dataset root dir
train: D:/Progarm_Data/python_project/data/seg/split/images/train # train images (relative to 'path') 118287 images
val: D:/Progarm_Data/python_project/data/seg/split/images/val # val images (relative to 'path') 5000 images
test: D:/Progarm_Data/python_project/data/seg/split/images/test # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# Classes
names:
0: cap
1: root
新建yolov8_seg_train.py文件,训练数据集
# 实例分割训练文件
from ultralytics import YOLO
# Load a model
model = YOLO("yolov8-seg.yaml") # build a new model from scratch
model = YOLO('yolov8n-seg.pt') # load a pretrained model (recommended for training)
model = YOLO('yolov8-seg.yaml').load('yolov8n.pt') # build from YAML and transfer weights
# Use the model
model.train(data="datasets/mushroom-seg.yaml", task="segment", mode="train", workers=0, batch=4, epochs=300,
device=0) # train the model
训练成功后获得best.pt权重文件
新建ImageDetection.py图片推理文件
# 引入opencv
import cv2
# 引入YOLO模型
from ultralytics import YOLO
# 打开图像
img_path = "D:/Progarm_Data/python_project/data/Images/000062.jpg" # 这里修改你图像保存路径
# 打开图像
img = cv2.imread(filename=img_path)
# 加载模型
model = YOLO(model="D:/Progarm_Data/python_project/ultralytics-main/myData/runs/segment/train/weights/best.pt") # 这里修改你图像保存路径
# 正向推理
res = model(img)
# 绘制推理结果
annotated_img = res[0].plot()
# 显示图像
cv2.imshow(winname="YOLOV8", mat=annotated_img)
# 等待时间
cv2.waitKey(delay=10000)
# 绘制推理结果
cv2.imwrite(filename="jieguo.jpeg", img=annotated_img)
推理结果如下