YoloV8目标检测

概要

记录一下初学YoloV8目标检测所过的流程

整体架构流程

python:2.7.18

conda:24.1.2

在安装完YoloV8必要的环境之后为避免项目过于混乱给创建文件夹myData用于存放自己的数据

项目架构如下图:

其中imgages中用于存放要检测的数据集

终端输入labelme,若显示‘labelme不是内部命令’终端输入:pip install labelme进行安装即可。

更改路径将输出的标注json标注文件存放入myData中的img文件夹中,打开imgs中的图片进行标注

右键图片点击‘创建矩形’

这里我创建两个类‘charge’和‘erji’

截下来需要将json文件转换成txt格式执行如下代码进行:注意更改成自己的路径

import base64
import random
import shutil
from tqdm import tqdm
import math
import json
import os
import numpy as np
import PIL.Image
import PIL.ImageDraw
import cv2


class ConvertManager(object):
    def __init__(self):
        pass

    def base64_to_numpy(self, img_bs64):
        img_bs64 = base64.b64decode(img_bs64)
        img_array = np.frombuffer(img_bs64, np.uint8)
        cv2_img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
        return cv2_img

    @classmethod
    def load_labels(cls, name_file):
        '''
        load names from file.one name one line
        :param name_file:
        :return:
        '''
        with open(name_file, 'r') as f:
            lines = f.read().rstrip('\n').split('\n')
        return lines

    def get_class_names_from_all_json(self, json_dir):
        classnames = []
        for file in os.listdir(json_dir):
            if not file.endswith('.json'):
                continue
            with open(os.path.join(json_dir, file), 'r', encoding='utf-8') as f:
                data_dict = json.load(f)
                for shape in data_dict['shapes']:
                    if not shape['label'] in classnames:
                        classnames.append(shape['label'])
        return classnames

    def create_save_dir(self, save_dir):
        images_dir = os.path.join(save_dir, 'images')
        labels_dir = os.path.join(save_dir, 'labels')
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
            os.mkdir(images_dir)
            os.mkdir(labels_dir)
        else:
            if not os.path.exists(images_dir):
                os.mkdir(images_dir)
            if not os.path.exists(labels_dir):
                os.mkdir(labels_dir)
        return images_dir + os.sep, labels_dir + os.sep

    def save_list(self, data_list, save_file):
        with open(save_file, 'w') as f:
            f.write('\n'.join(data_list))

    def __rectangle_points_to_polygon(self, points):
        xmin = 0
        ymin = 0
        xmax = 0
        ymax = 0
        if points[0][0] > points[1][0]:
            xmax = points[0][0]
            ymax = points[0][1]
            xmin = points[1][0]
            ymin = points[1][1]
        else:
            xmax = points[1][0]
            ymax = points[1][1]
            xmin = points[0][0]
            ymin = points[0][1]
        return [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]

    def convert_dataset(self, json_dir, json_list, images_dir, labels_dir, names, save_mode='train'):
        images_dir = os.path.join(images_dir, save_mode) + os.sep
        labels_dir = os.path.join(labels_dir, save_mode) + os.sep
        if not os.path.exists(images_dir):
            os.mkdir(images_dir)
        if not os.path.exists(labels_dir):
            os.mkdir(labels_dir)
        for file in tqdm(json_list):
            with open(os.path.join(json_dir, file), 'r', encoding='utf-8') as f:
                data_dict = json.load(f)
            image_file = os.path.join(json_dir, os.path.basename(data_dict['imagePath']))
            if os.path.exists(image_file):
                shutil.copyfile(image_file, images_dir + os.path.basename(image_file))
            else:
                imageData = data_dict.get('imageData')
                if not imageData:
                    imageData = base64.b64encode(imageData).decode('utf-8')
                    img = self.img_b64_to_arr(imageData)
                    PIL.Image.fromarray(img).save(images_dir + file[:-4] + 'png')
            # convert to txt
            width = data_dict['imageWidth']
            height = data_dict['imageHeight']
            line_list = []
            for shape in data_dict['shapes']:
                data_list = []
                data_list.append(str(names.index(shape['label'])))
                if shape['shape_type'] == 'rectangle':
                    points = self.__rectangle_points_to_polygon(shape['points'])
                    for point in points:
                        data_list.append(str(point[0] / width))
                        data_list.append(str(point[1] / height))


                elif shape['shape_type'] == 'polygon':
                    points = shape['points']
                    for point in points:
                        data_list.append(str(point[0] / width))
                        data_list.append(str(point[1] / height))
                line_list.append(' '.join(data_list))

            self.save_list(line_list, labels_dir + file[:-4] + "txt")

    def split_train_val_test_dataset(self, file_list, train_ratio=0.9, trainval_ratio=0.9, need_test_dataset=False,
                                     shuffle_list=True):
        if shuffle_list:
            random.shuffle(file_list)
        total_file_count = len(file_list)
        train_list = []
        val_list = []
        test_list = []
        if need_test_dataset:
            trainval_count = int(total_file_count * trainval_ratio)
            trainval_list = file_list[:trainval_count]
            test_list = file_list[trainval_count:]
            train_count = int(train_ratio * len(trainval_list))
            train_list = trainval_list[:train_count]
            val_list = trainval_list[train_count:]
        else:
            train_count = int(train_ratio * total_file_count)
            train_list = file_list[:train_count]
            val_list = file_list[train_count:]
        return train_list, val_list, test_list

    def start(self, json_dir, save_dir, names=None, train_ratio=0.9):
        images_dir, labels_dir = self.create_save_dir(save_dir)
        if names is None or len(names) == 0:
            print('class names will load from all json file')
            names = self.get_class_names_from_all_json(json_dir)
        print('find {} class names :'.format(len(names)), names)
        if len(names) == 0:
            return

        self.save_list(names, os.path.join(save_dir, 'labels.txt'))
        print('start convert')
        all_json_list = []
        for file in os.listdir(json_dir):
            if not file.endswith('.json'):
                continue
            all_json_list.append(file)
        train_list, val_list, test_list = self.split_train_val_test_dataset(all_json_list, train_ratio)
        self.convert_dataset(json_dir, train_list, images_dir, labels_dir, names, 'train')
        self.convert_dataset(json_dir, val_list, images_dir, labels_dir, names, 'val')


if __name__ == '__main__':
    cm = ConvertManager()
    cm.start(r'D:\work\yolov8-3\yolov8\myData\img', r'D:\work\yolov8-3\yolov8\myData\test')

将生成的数据集和验证集复制到labels文件夹下

随后执行create.py 

为数据集创建训练和验证文本文件,将数据集分成了训练集和验证集。
# coding:utf-8

import os
import random
import argparse
# 为数据集创建训练和验证文本文件,将数据集分成了训练集和验证集。
parser = argparse.ArgumentParser()
#xml文件的地址,根据自己的数据进行修改 xml一般存放在Annotations下
parser.add_argument('--xml_path', default='labels', type=str, help='input xml label path')
#数据集的划分,地址选择自己数据下的ImageSets/Main
parser.add_argument('--txt_path', default='dataSet', type=str, help='output txt label path')
opt = parser.parse_args()

trainval_percent = 1.0
train_percent = 0.9
xmlfilepath = opt.xml_path
txtsavepath = opt.txt_path
total_xml = os.listdir(xmlfilepath)
if not os.path.exists(txtsavepath):
    os.makedirs(txtsavepath)

num = len(total_xml)
list_index = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list_index, tv)
train = random.sample(trainval, tr)

file_trainval = open(txtsavepath + '/trainval.txt', 'w')
file_test = open(txtsavepath + '/test.txt', 'w')
file_train = open(txtsavepath + '/train.txt', 'w')
file_val = open(txtsavepath + '/val.txt', 'w')

for i in list_index:
    name = 'D:/work/yolov8-3/yolov8/myData/images/'+total_xml[i][:-4] +'.jpg' '\n'
    if i in trainval:
        file_trainval.write(name)
        if i in train:
            file_train.write(name)
        else:
            file_val.write(name)
    else:
        file_test.write(name)

file_trainval.close()
file_train.close()
file_val.close()
file_test.close()

随后编写test.yaml 

train: 'D:\work\yolov8-3\yolov8\myData\dataSet\train.txt'
val: 'D:\work\yolov8-3\yolov8\myData\dataSet\val.txt'
nc: 2
names: ['erji','charge']

 编写python训练代码

from ultralytics import YOLO
from ultralytics.utils.torch_utils import select_device

# Load a model
# model = YOLO(r'E:\code\yolov8\ultralytics\cfg\models\v8\yolov8.yaml')  # build a new model from YAML
model = YOLO(r'D:\work\yolov8-3\yolov8\ultralytics\cfg\models\v8\yolov8.yaml')  # build a new model from YAML
# model = YOLO(r'E:\code\yolov8\yolov8n.pt')  # load a pretrained model (recommended for training)
model = YOLO(r'D:\work\yolov8-3\yolov8\yolov8n.pt')  # load a pretrained model (recommended for training)
#model = YOLO('yolov8n-seg.yaml').load('yolov8n.pt')  # build from YAML and transfer weights

# Train the model
results = model.train(data=r'D:\work\yolov8-3\yolov8\myData\test.yaml', epochs=100, imgsz=640)

注意更改成自己的路径执行训练 

yolov8可以多设置一些训练轮次,v8在检测到最好的模型之后会自动停止训练

训练完成之后将runs 预测文件夹下的训练权重覆盖到myData文件夹下(因为本人设置的原因 避免频繁修改路径)之后执行预测文件进入runs文件夹下寻找训练结果

from ultralytics import YOLO


model = YOLO(r"D:\work\yolov8-3\yolov8\myData\best.pt")
source = r"D:\work\yolov8-3\yolov8\myData\images"
# source = r"E:\code\yolov8\myData\test\1.jpg"
results = model.predict(source=source, save=True)  # save predictions as labels


 

常见报错

如果出现训练权重为0的问题修改默认yaml文件amp属性 将True改为False

其他问题多为路径问题 其中要注意create.py中路径为/ 

小结

写作目的为记录自己的练习过程

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值