学习系列三:V8目标检测与分割自动化标注

学习系列三:YOLOv8目标检测与分割自动化标注

提示:本次文章主要介绍yolov8目标检测与自动化标注(较简单,通用性比较强,标签格式txt),yolov8实例分割与自动化标注(程序较复杂,自动化标注效果有待提升,标签格式txt),大家有更好的想法可以在评论区进行交流。



一、YOLOv8训练目标检测数据集

1.1 数据集的划分

在进行训练目标检测数据前,需要划分训练集、验证集和测试集,可以进行人工的划分,可以使用代码进行自主进行划分,这里主要讲述使用代码进行自主划分训练集、验证集和测试集。
在划分之前需要看一下数据集的格式:数据集文件夹(tuoluo-aug), 数据集图片文件夹(images),数据集标签文件夹(labels, txt格式标签文件)
在这里插入图片描述

代码进行自主划分训练集、验证集和测试集。这里可以自己定义训练集、验证集和测试集所占比例多少。

from ultralytics.data.utils import autosplit

autosplit(
    path='/home/xiao/dataset/tuoluo-aug/images',
    weights=(0.8, 0.2, 0.00), # (train, validation, test) fractional splits
    annotated_only=False     # split only images with annotation file when True
)

运行之后在 tuoluo-aug文件下会生成txt文件,如下:
在这里插入图片描述

1.2 目标检测训练脚本

针对YOLOv8这里我写一个简单的脚本训练目标检测数据集,放在工程ultralytics-xiao下面,脚本文件名称定义为tuoluo.py,代码如下:

from ultralytics import YOLO


model = YOLO("/home/xiao/ultralytics-main/weights/yolov8n.pt") 
model.train(data="/home/xiao/ultralytics-main/ultralytics/cfg/datasets/bolt-detachment-aug.yaml",
                      epochs=120,
                      imgsz = 1280,
                      device= [0],
                      workers = 2,
                      batch =8,
                      patience=120
                      )

metrics = model.val()  # 在验证集上评估模型性能

在上述代码中数据集配置文件路径:“/home/xiao/ultralytics-main/ultralytics/cfg/datasets/bolt-detachment-aug.yaml”
配置文件内容为:

path: /home/xiao/dataset/tuoluo-aug/
train: autosplit_train.txt             
val: autosplit_val.txt                                           
test: autosplit_test.txt                                         

names:
  0: detachment


nc : 1

上述完成后,运行tuoluo.py即可开启训练。(上述数据集标签为:detachment)

二、实例化推理检测函数

针对上述训练完成后,会生成对应最好的权重,试用该权重进行推理新的图片,代码如下(封装成类函数了):

from ultralytics import YOLO
import cv2
import numpy as np
import time
import os


# 加载模型
model_file = './weight/best.pt'

model = YOLO(model_file)
objs_labels = model.names
print(objs_labels)


class yolo_demo:

    def __init__(self):

        self.model_file_1 = './weight/best.pt'

        self.model_1 = YOLO(self.model_file_1)

        self.objs_labels_1 = self.model_1.names


    def img_file_folder(self, img_folder):

        image_paths = []
        for image_name in os.listdir(img_folder):
            image_path = os.path.join(img_folder, image_name)
            if os.path.isfile(image_path) and image_name.lower().endswith(
                    ('.png', '.jpg', '.jpeg', '.bmp', '.tif', '.tiff')):
                image_paths.append(image_path)

        return image_paths

    def main(self, img_folder):

        for i, img_path in enumerate(self.img_file_folder(img_folder)):

            # 获取图片文件名
            img_id = img_path.split(os.sep)[-1].split('.')[0]

            # bgr
            image_bgr = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), -1)

            result = list(self.model_1(image_bgr, stream=True, conf=0.7))[0]
            boxes = result.boxes
            boxes = boxes.cpu().numpy()

            for box in boxes.data:
                l, t, r, b = box[:4].astype(np.int32)

                conf, id = box[4:]

                if id == 0:
                    # 绘制标签矩形框
                    cv2.rectangle(image_bgr, (l, t), (r, b), (0, 255, 0), 1)
                    label = "detachment"
                    cv2.putText(image_bgr, label, (l, t - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)

            output_image_path = f"./output-v4/{img_id}.jpg"
            cv2.imwrite(output_image_path, image_bgr)


if __name__ =="__main__":

    plate_demo = yolo_demo()
    img_path = r"./detachment-images"

三、目标检查自动化标注与标签生成(txt格式)

适用于已经使用少量数据集图片进行YOLOv8训练,得到权重并使用该权重对新的数据集图片进行自动标注,后期可人工进行检查标注的好坏,进行简单修改,相比较于从头进行标注节省了大量的时间。

"""
yolov8 自动标注, 后续需要手工进行修改校准
"""
from ultralytics import YOLO
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
import glob
import shutil
import tqdm

# 加载模型
model_file = "./weights1to4/detachment.pt"
model = YOLO(model_file)
objs_labels = model.names
print(objs_labels)

# 读取图片
images_list = glob.glob('./img-detachment-9/*.jpg')

print(len(images_list))

# 创建标签文件夹
if not os.path.exists('./detachment-labels'):
    os.mkdir('./detachment-labels')

# 标注
def image_2_yolo():

    # 保存路径前缀
    savePathPrefix = "./detachment-labels/"

    # 遍历每张图片
    for img in images_list:

        # 获取图片文件名
        img_id = img.split(os.sep)[-1].split('.')[0]

        # 读取图片
        img_data = cv2.imread(img)
        # 检测
        result = list(model(img_data, stream=True, conf=0.5))[0]
        boxes = result.boxes
        boxes = boxes.cpu().numpy()

        yolo_boxes = []
        # 获取图片宽高
        img_h, img_w, _ = img_data.shape

        # 遍历每个框
        for box in boxes.data:

            l, t, r, b = box[:4].astype(np.int32)
            conf, id = box[4:]

            # 筛选出detachment类别, 转为yolo格式:类别id, x_center, y_center, width, height, 归一化到0-1, 保留6位小数
            if id == 0:
                class_label = int(id)
                x_center = round((l + r) / 2 / img_w, 6)
                y_center = round((t + b) / 2 / img_h, 6)
                width = round((r - l) / img_w, 6)
                height = round((b - t) / img_h, 6)

                yolo_boxes.append([class_label, x_center, y_center, width, height])

        # 写入txt文件
        # 生成yolo格式的标注文件
        yoloLabelFile = savePathPrefix + img_id + '.txt'
        with open(yoloLabelFile, 'w') as f:
            for yolo_box in yolo_boxes:
                f.write(' '.join([str(i) for i in yolo_box]) + '\n')


if __name__ == '__main__':
    image_2_yolo()

四、YOLOv8训练实例分割数据集

from ultralytics import YOLO


model = YOLO('/home/hxz/xiao/ultralytics-xiao/ultralytics/cfg/models/v8/yolov8-seg.yaml').load('/home/hxz/xiao/ultralytics-xiao/runs/segment/train6/weights/best.pt')


model.train(data="/home/hxz/xiao/ultralytics-xiao/ultralytics/cfg/datasets/seg-bolt-line-cz933-weitiao.yaml",
            task="segment",
            mode="train",
            overlap_mask=False,
            batch=16,
            device=0,
            epochs=600,
            patience=600,
            imgsz=128)

metrics = model.val(iou=0.7)


try:
    a = metrics.box.map               # map50-95
    b = metrics.box.map50             # map50
    c = metrics.box.map75             # map75
    d = metrics.box.maps              # 返回一个列表,包含每个类别的mAP值(IoU在0.5到0.95)
    print("result:", a, b, c, d)

except Exception as e:
    print('f{有一些问题}')

五、实例化推理检测函数

from ultralytics import YOLO

# Load a model
model = YOLO("/home/hxz/xiao/ultralytics-xiao/runs/segment/train6/weights/best.pt")  # load a custom model

# Predict with the model
source = "/home/hxz/xiao/ultralytics-xiao/test-img" # predict on an image

results = model.predict(source, save=True)

六、自动化标注与标签生成

这部分是根据比较少的数据集训练得到的数据的权重,根据权重区预测新的图片,获得mask,根据mask读取坐标保存至txt文件。

from ultralytics import YOLO
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
import glob


# 加载分割模型
model = YOLO("./weights/fks-seg.pt")  # 加载自定义模型

# 预测源
image_folder = r"./fks-seg/images"

# 获取文件夹中所有图片的路径
image_paths = glob.glob(os.path.join(image_folder, "*.jpg"))

# 保存分割结果坐标点的目录
output_dir = "./fks-seg/txt_labels"
os.makedirs(output_dir, exist_ok=True)


for image_path in image_paths:

    results = model.predict(image_path)

    base_name = os.path.basename(image_path).split('.')[0]

    # 创建输出文件路径
    txt_filename = os.path.join(output_dir, f"{base_name}.txt")

    for result in results:
        masks = result.masks  # 获取分割掩码
        classes = result.boxes.cls if result.boxes is not None else []  # 获取每个掩码的类别

        # 获取图像的宽度和高度以进行归一化
        height, width = result.orig_img.shape[:2]
        print(height, width)

        # 创建输出文件路径
        txt_filename = os.path.join(output_dir, f"{os.path.basename(result.path).split('.')[0]}.txt")

        with open(txt_filename, 'w') as f:
            if masks is not None:
                for i, mask in enumerate(masks.data):
                    # 将 PyTorch 张量转换为 NumPy 数组并确保掩码是二值图像
                    mask = mask.cpu().numpy().astype(np.uint8)

                    contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

                    # 获取当前掩码对应的类别
                    class_id = int(classes[i]) if i < len(classes) else -1

                    # 准备一个列表来存储该类别的所有归一化坐标
                    coordinates = []

                    for contour in contours:

                        # 将轮廓的坐标点进行归一化
                        normalized_coordinates = []
                        for point in contour:
                            x, y = point[0]
                            x_normalized = x / mask.shape[1]
                            y_normalized = y / mask.shape[0]
                            normalized_coordinates.append(f"{x_normalized:.6f} {y_normalized:.6f}")

                        # 获取当前掩码对应的类别
                        class_id = int(classes[i]) if i < len(classes) else -1

                        # 写入 YOLO 格式的标签文件(类别 + 轮廓坐标)
                        if normalized_coordinates:
                            f.write(f"{class_id} " + " ".join(normalized_coordinates) + "\n")

                    print(f"Saved YOLO segmentation coordinates to {txt_filename}")



将生成的txt文件转json后,把图片和生成的json文件放在同一个文件夹,可在lableme中查看自动标注的情况。

import os
import json
import cv2
import numpy as np
import glob


def yolo_to_labelme(txt_file, img_file, class_names, output_json):
    if not os.path.exists(txt_file):
        print(f"File {txt_file} does not exist.")
        return

    img = cv2.imdecode(np.fromfile(img_file, dtype=np.uint8), -1)
    height, width = img.shape[:2]

    shapes = []

    with open(txt_file, 'r') as f:
        lines = f.readlines()

        for line in lines:
            parts = line.strip().split()
            class_id = int(parts[0])
            # 将归一化的坐标转换为非归一化的像素坐标
            points = [(float(parts[i]) * width, float(parts[i + 1]) * height) for i in range(1, len(parts), 2)]

            shape = {
                "label": class_names[class_id],
                "points": points,
                "group_id": None,
                "shape_type": "polygon",
                "flags": {}
            }
            shapes.append(shape)

    labelme_data = {
        "version": "4.5.6",
        "flags": {},
        "shapes": shapes,
        "imagePath": os.path.basename(img_file),
        "imageData": None,
        "imageHeight": height,
        "imageWidth": width
    }

    with open(output_json, 'w') as f:
        json.dump(labelme_data, f, indent=4)
    print(f"Saved JSON to {output_json}")


def batch_process_yolo_to_labelme(image_folder, label_folder, output_folder, class_names):
    os.makedirs(output_folder, exist_ok=True)

    # 获取所有的图片文件
    image_files = glob.glob(os.path.join(image_folder, "*.jpg"))

    for img_file in image_files:
        # 获取对应的 txt 文件
        base_name = os.path.basename(img_file).split('.')[0]
        txt_file = os.path.join(label_folder, f"{base_name}.txt")

        # 输出的 JSON 文件路径
        output_json = os.path.join(output_folder, f"{base_name}.json")

        # 转换 YOLO 到 LabelMe 格式
        yolo_to_labelme(txt_file, img_file, class_names, output_json)


# 示例使用
class_names = ["red"]  # 根据你的类别顺序填写
image_folder = r"./fks-seg/images"
label_folder = r"./fks-seg/txt_labels"
output_folder = r"./fks-seg/json_labels"

batch_process_yolo_to_labelme(image_folder, label_folder, output_folder, class_names)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小啊磊_Vv

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值