使用VOC2007数据集yolov8模型训练 预测 验证与导出

使用VOC2007数据集yolov8模型训练,预测,验证与导出
(此处省略环境配置,使用GPU)
以下是我模型类别标签:
names:
0: dog
1: person
2: train
3: sofa
4: chair
5: car
6: pottedplant
7: diningtable
8: horse
9: cat
10: cow
11: bus
12: bicycle
13: aeroplane
14: motorbike
15: tvmonitor
16: bird
17: bottle
18: boat
19: sheep
文件夹目录

Annotations:为xml文件储存
Dataset_path:为图片索引路径
Images:为图片储存路径
Imagesets:为图片编号索引
JPEGImages:为原始图片(不重要)
Labels:是由xml文件转换的
Split_train_val.py:可以生成imagesets里的文件

# coding:utf-8

import os
import random
import argparse

parser = argparse.ArgumentParser()
# xml文件的地址,根据自己的数据进行修改 xml一般存放在Annotations下
parser.add_argument('--xml_path', default='Annotations', type=str, help='input xml label path')
# 数据集的划分,地址选择自己数据下的ImageSets/Main
parser.add_argument('--txt_path', default='ImageSets/Main', type=str, help='output txt label path')
opt = parser.parse_args()

trainval_percent = 1.0  # 训练集和验证集所占比例。 这里没有划分测试集
train_percent = 0.9  # 训练集所占比例,可自己进行调整
xmlfilepath = opt.xml_path
txtsavepath = opt.txt_path
total_xml = os.listdir(xmlfilepath)
if not os.path.exists(txtsavepath):
    os.makedirs(txtsavepath)

num = len(total_xml)
list_index = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list_index, tv)
train = random.sample(trainval, tr)

file_trainval = open(txtsavepath + '/trainval.txt', 'w')
file_test = open(txtsavepath + '/test.txt', 'w')
file_train = open(txtsavepath + '/train.txt', 'w')
file_val = open(txtsavepath + '/val.txt', 'w')

for i in list_index:
    name = total_xml[i][:-4] + '\n'
    if i in trainval:
        file_trainval.write(name)
        if i in train:
            file_train.write(name)
        else:
            file_val.write(name)
    else:
        file_test.write(name)

file_trainval.close()
file_train.close()
file_val.close()
file_test.close()

imagesets文件夹

imagesets/main/train文件

Xml_to_yolo.py:可以使annotations里的xml文件转换至lables里所要的文件

# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import os
from os import getcwd

sets = ['train', 'val', 'test']
classes = ["dog", "person", "train", "sofa", "chair", "car", "pottedplant", "diningtable", "horse", "cat",
           "cow", "bus", "bicycle", "aeroplane", "motorbike", "tvmonitor", "bird", "bottle", "boat", "sheep"] # 改成自己的类别
abs_path = os.getcwd()
print(abs_path)


def convert(size, box):
    dw = 1. / (size[0])
    dh = 1. / (size[1])
    x = (box[0] + box[1]) / 2.0 - 1
    y = (box[2] + box[3]) / 2.0 - 1
    w = box[1] - box[0]
    h = box[3] - box[2]
    x = x * dw
    w = w * dw
    y = y * dh
    h = h * dh
    return x, y, w, h


def convert_annotation(image_id):
    in_file = open('D:/yolov8a/yolov8-ultralytics/data/VOCdevkit/VOC2007/Annotations/%s.xml' % (image_id), encoding='UTF-8')
    out_file = open('D:/yolov8a/yolov8-ultralytics/data/VOCdevkit/VOC2007/labels/%s.txt' % (image_id), 'w')
    tree = ET.parse(in_file)
    root = tree.getroot()
    root = tree.getroot()
    size = root.find('size')
    w = int(size.find('width').text)
    h = int(size.find('height').text)
    for obj in root.iter('object'):
        difficult = obj.find('difficult').text
        # difficult = obj.find('Difficult').text
        cls = obj.find('name').text
        if cls not in classes or int(difficult) == 1:
            continue
        cls_id = classes.index(cls)
        xmlbox = obj.find('bndbox')
        b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
             float(xmlbox.find('ymax').text))
        b1, b2, b3, b4 = b
        # 标注越界修正
        if b2 > w:
            b2 = w
        if b4 > h:
            b4 = h
        b = (b1, b2, b3, b4)
        bb = convert((w, h), b)
        out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')

wd = getcwd()
for image_set in sets:
    if not os.path.exists('D:/yolov8a/yolov8-ultralytics/data/VOCdevkit/VOC2007/labels/'):
        os.makedirs('D:/yolov8a/yolov8-ultralytics/data/VOCdevkit/VOC2007/labels/')
    image_ids = open('D:/yolov8a/yolov8-ultralytics/data/VOCdevkit/VOC2007/ImageSets/Main/%s.txt' % (image_set)).read().strip().split()

    if not os.path.exists('D:/yolov8a/yolov8-ultralytics/data/VOCdevkit/VOC2007/dataSet_path/'):
        os.makedirs('D:/yolov8a/yolov8-ultralytics/data/VOCdevkit/VOC2007/dataSet_path/')

    list_file = open('dataSet_path/%s.txt' % (image_set), 'w')
    # 这行路径不需更改,这是相对路径
    for image_id in image_ids:
        list_file.write('D:/yolov8a/yolov8-ultralytics/data/VOCdevkit/VOC2007/images/%s.jpg\n' % (image_id))
        convert_annotation(image_id)
    list_file.close()

注意:

  1. 类别标签不知道可以使用 查看文件夹下xml类别标签.py 查询
  2. 删除多余文件可以使用sc.xml.py 当图片文件太多时,模型可能跑不起来

查看文件夹下xml类别标签.py

import os
import xml.etree.ElementTree as ET

def get_class_labels(xml_file):
    # 解析XML文件
    tree = ET.parse(xml_file)
    root = tree.getroot()

    # 遍历所有的object标签,获取类别标签
    class_labels = []
    for obj in root.iter('object'):
        cls = obj.find('name').text
        class_labels.append(cls)

    return class_labels

def get_all_class_labels_in_folder(folder_path):
    # 获取文件夹中所有XML文件的路径
    xml_files = [f for f in os.listdir(folder_path) if f.endswith('.xml')]

    # 遍历所有XML文件,获取类别标签
    all_labels = []
    for xml_file in xml_files:
        xml_file_path = os.path.join(folder_path, xml_file)
        labels = get_class_labels(xml_file_path)
        all_labels.extend(labels)

    # 去除重复标签,保持顺序
    unique_labels = []
    label_to_number = {}
    label_counter = 1

    for label in all_labels:
        if label not in label_to_number:
            label_to_number[label] = label_counter
            unique_labels.append(label)
            label_counter += 1

    return label_to_number, unique_labels

# 替换这里的文件夹路径为你的XML文件夹路径
xml_folder_path = 'D:/yolov8a/yolov8-ultralytics/data/VOCdevkit/VOC2007/Annotations'

# 获取整个文件夹内所有XML文件的类别标签和数字映射,以及去重类别标签列表
label_to_number, unique_labels = get_all_class_labels_in_folder(xml_folder_path)

# 打印类别标签和数字映射
print("类别标签和数字映射:")
for label in unique_labels:
    number = label_to_number[label]
    print(f"{number}: {label}")


sc.xml.py
import os

def delete_files_outside_range(folder_path, start_file, end_file):
    # 获取文件夹中所有XML文件的路径
    xml_files = [f for f in os.listdir(folder_path) if f.endswith('.txt')]

    # 确保文件名按照数字顺序排序
    xml_files.sort()

    # 删除范围外的所有XML文件
    for xml_file in xml_files:
        if xml_file < start_file or xml_file > end_file:
            xml_file_path = os.path.join(folder_path, xml_file)
            os.remove(xml_file_path)

# 替换这里的文件夹路径为你的XML文件夹路径
xml_folder_path = 'D:\yolov8a\yolov8-ultralytics\data\VOCdevkit\VOC2007\labels'

# 定义删除范围,从 "000001.xml" 到 "002008.xml"
start_file = '000001.txt'
end_file = '002008.txt'

# 删除范围外的所有XML文件
delete_files_outside_range(xml_folder_path, start_file, end_file)

然后是coco128-seg.yaml文件
train: D:\yolov8a\yolov8-ultralytics\data\VOCdevkit\VOC2007\images
val: D:\yolov8a\yolov8-ultralytics\data\VOCdevkit\VOC2007\images

# Classes
names:
  0: dog
  1: person
  2: train
  3: sofa
  4: chair
  5: car
  6: pottedplant
  7: diningtable
  8: horse
  9: cat
  10: cow
  11: bus
  12: bicycle
  13: aeroplane
  14: motorbike
  15: tvmonitor
  16: bird
  17: bottle
  18: boat
  19: sheep

此文件需要自己创建 name为类别标签

Yolov8n.pt 不需要下载,跑模型时会自动下载
Yolov8s-seg.yaml 文件配置

nc: 20  # number of classes
depth_multiple: 0.33  # scales module repeats
width_multiple: 0.50  # scales convolution channels

# YOLOv8.0s backbone
backbone:
  # [from, repeats, module, args]
  - [-1, 1, Conv, [64, 3, 2]]  # 0-P1/2
  - [-1, 1, Conv, [128, 3, 2]]  # 1-P2/4
  - [-1, 3, C2f, [128, True]]
  - [-1, 1, Conv, [256, 3, 2]]  # 3-P3/8
  - [-1, 6, C2f, [256, True]]
  - [-1, 1, Conv, [512, 3, 2]]  # 5-P4/16
  - [-1, 6, C2f, [512, True]]
  - [-1, 1, Conv, [1024, 3, 2]]  # 7-P5/32
  - [-1, 3, C2f, [1024, True]]
  - [-1, 1, SPPF, [1024, 5]]  # 9

# YOLOv8.0s head
head:
  - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
  - [[-1, 6], 1, Concat, [1]]  # cat backbone P4
  - [-1, 3, C2f, [512]]  # 13

  - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
  - [[-1, 4], 1, Concat, [1]]  # cat backbone P3
  - [-1, 3, C2f, [256]]  # 17 (P3/8-small)

  - [-1, 1, Conv, [256, 3, 2]]
  - [[-1, 12], 1, Concat, [1]]  # cat head P4
  - [-1, 3, C2f, [512]]  # 20 (P4/16-medium)

  - [-1, 1, Conv, [512, 3, 2]]
  - [[-1, 9], 1, Concat, [1]]  # cat head P5
  - [-1, 3, C2f, [1024]]  # 23 (P5/32-large)

  - [[15, 18, 21], 1, Segment, [nc, 32, 256]]  # Detect(P3, P4, P5)

只需要改一个参数 nc = 你的类别标签个数

然后就可以开始训练过程
模型训练命令:

yolo detect train data=D:\yolov8a\yolov8-ultralytics\data\coco128-seg.yaml model=D:\yolov8a\yolov8-ultralytics\weights\yolov8n.pt epochs=10 imgsz=640 device=0

模型会储存在runs/detect/train文件夹下
模型所在文件夹

模型的预测:

yolo predict model=D:\yolov8a\yolov8-ultralytics\runs\detect\train4\weights\best.pt source=此处给你想要预测图片的绝对路径 imgsz=640

预测结果一样也会在runs/detect/predict 文件夹下
在这里插入图片描述

模型的验证:

yolo val model=D:\yolov8a\yolov8-ultralytics\runs\detect\train4\weights\best.pt data=D:\yolov8a\yolov8-ultralytics\data\coco128-seg.yaml batch=1 imgsz=640

模型的导出:

yolo export model=D:\yolov8a\yolov8-ultralytics\runs\detect\train4\weights\best.pt format=onnx

导出的模型以onnx结尾
在这里插入图片描述

可打开网页Netron打开文件详细查看模型结构

在这里插入图片描述

  • 32
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值