YOLOV10在租赁的服务器上训练双数据集(coco数据集和ExDark数据集

1.环境配置

conda环境

# 1.在conda创建python3.9环境
conda create -n yolov10 python=3.9
# 2.激活切换到创建的python3.9环境
conda activate yolov10

yolov10依赖 

# 1.下载yolov10源码
git clone https://github.com/THU-MIG/yolov10.git
# 2.切换到yolov10源码根目录下,安装依赖
# 注意:会自动根据你是否有GPU自动选择pytorch版本进行按照,这里不需要自己去选择pytorch和cuda按照,非常良心
pip install -r requirements.txt -i https://pypi.doubanio.com/simple
# 3.运行下面的命令,才可以在命令行使用yolo等命令
pip install -e .

2.数据集

coco数据集下载

#训练集数据集

wget http://images.cocodataset.org/zips/train2017.zip -O train2017.zip

#验证集数据集

wget http://images.cocodataset.org/zips/val2017.zip -O val2017.zip

#测试集数据集

wget http://images.cocodataset.org/zips/test2017.zip -O test2017.zip

unzip train2017.zip
unzip val2017.zip
unzip test2017.zip

coco标签下载转化

wget https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip

coco标签格式转化,新建json2txt.py

#COCO 格式的数据集转化为 YOLO 格式的数据集
#--json_path 输入的json文件路径
#--save_path 保存的文件夹名字,默认为当前目录下的labels。

import os
import json
from tqdm import tqdm
import argparse

parser = argparse.ArgumentParser()
#这里根据自己的json文件位置,换成自己的就行
parser.add_argument('--json_path', default='/root/autodl-tmp/coco/data/annotations/instances_train2017.json',type=str, help="input: coco format(json)")
#这里设置.txt文件保存位置
parser.add_argument('--save_path', default='/root/autodl-tmp/coco/label', type=str, help="specify where to save the output dir of labels")
arg = parser.parse_args()

def convert(size, box):
    dw = 1. / (size[0])
    dh = 1. / (size[1])
    x = box[0] + box[2] / 2.0
    y = box[1] + box[3] / 2.0
    w = box[2]
    h = box[3]
#round函数确定(xmin, ymin, xmax, ymax)的小数位数
    x = round(x * dw, 6)
    w = round(w * dw, 6)
    y = round(y * dh, 6)
    h = round(h * dh, 6)
    return (x, y, w, h)

if __name__ == '__main__':
    json_file =   arg.json_path # COCO Object Instance 类型的标注
    ana_txt_save_path = arg.save_path  # 保存的路径

    data = json.load(open(json_file, 'r'))
    if not os.path.exists(ana_txt_save_path):
        os.makedirs(ana_txt_save_path)

    id_map = {} # coco数据集的id不连续!重新映射一下再输出!
    with open(os.path.join(ana_txt_save_path, 'classes.txt'), 'w') as f:
        # 写入classes.txt
        for i, category in enumerate(data['categories']):
            f.write(f"{category['name']}\n")
            id_map[category['id']] = i
    # print(id_map)
    #这里需要根据自己的需要,更改写入图像相对路径的文件位置。
    list_file = open(os.path.join(ana_txt_save_path, 'train2017.txt'), 'w')
    for img in tqdm(data['images']):
        filename = img["file_name"]
        img_width = img["width"]
        img_height = img["height"]
        img_id = img["id"]
        head, tail = os.path.splitext(filename)
        ana_txt_name = head + ".txt"  # 对应的txt名字,与jpg一致
        f_txt = open(os.path.join(ana_txt_save_path, ana_txt_name), 'w')
        for ann in data['annotations']:
            if ann['image_id'] == img_id:
                box = convert((img_width, img_height), ann["bbox"])
                f_txt.write("%s %s %s %s %s\n" % (id_map[ann["category_id"]], box[0], box[1], box[2], box[3]))
        f_txt.close()
        #将图片的相对路径写入train2017或val2017的路径
        list_file.write('./images/train2017/%s.jpg\n' %(head))
    list_file.close()

ExDark数据集下载

飞桨下载链接:ExDark_数据集-飞桨AI Studio星河社区

百度云链接:https://pan.baidu.com/s/1hYT2qe5TaI_x59XQf0IuUg
提取码:8888

unzip ExDark.zip

ExDark数据集标签转化划分训练验证以及测试集 

新建exdark2yolo.py文件,并将标签按照coco数据集标签的索引进行转化

import os
from PIL import Image
import argparse
import shutil

# 对应的标签和他们的新编号
labels = {
    'Bicycle': 1,
    'Boat': 8,
    'Bottle': 39,
    'Bus': 5,
    'Car': 2,
    'Cat': 15,
    'Chair': 56,
    'Cup': 41,
    'Dog': 16,
    'Motorbike': 3,
    'People': 0,
    'Table': 60
}

def ExDark2Yolo(txts_dir: str, imgs_dir: str, ratio: str, version: int, output_dir: str):
    ratios = ratio.split(':')
    ratio_train, ratio_test, ratio_val = float(ratios[0]), float(ratios[1]), float(ratios[2])
    ratio_sum = ratio_train + ratio_test + ratio_val
    dataset_perc = {'train': ratio_train / ratio_sum, 'test': ratio_test / ratio_sum, 'val': ratio_val / ratio_sum}

    for t in dataset_perc:
        os.makedirs('/'.join([output_dir, 'images', t]), exist_ok=True)
        os.makedirs('/'.join([output_dir, 'labels', t]), exist_ok=True)

    for label in labels:
        print('Processing {}...'.format(label))
        filenames = os.listdir('/'.join([txts_dir, label]))
        cur_idx = 0
        files_num = len(filenames)

        for filename in filenames:
            cur_idx += 1
            filename_no_ext = '.'.join(filename.split('.')[:-1])
            if cur_idx < dataset_perc.get('train') * files_num:
                set_type = 'train'
            elif cur_idx < (dataset_perc.get('train') + dataset_perc.get('test')) * files_num:
                set_type = 'test'
            else:
                set_type = 'val'
            output_label_path = '/'.join([output_dir, 'labels', set_type, filename_no_ext + '.txt'])
            yolo_output_file = open(output_label_path, 'a')

            name_split = filename.split('.')
            img_path = '/'.join([imgs_dir, label, '.'.join(filename.split('.')[:-1])])
            try:
                img = Image.open(img_path)
            except FileNotFoundError:
                img_path = '/'.join([imgs_dir, label, ''.join(name_split[:-2]) + '.' + name_split[-2].upper()])
                img = Image.open(img_path)

            output_img_path = '/'.join([output_dir, 'images', set_type])
            shutil.copy(img_path, output_img_path)

            width, height = img.size
            txt = open('/'.join([txts_dir, label, filename]), 'r')
            txt.readline()  # ignore first line
            line = txt.readline()

            while line != '':
                datas = line.strip().split()
                class_idx = labels[datas[0]]  # 使用新的类别索引
                x0, y0, w0, h0 = int(datas[1]), int(datas[2]), int(datas[3]), int(datas[4])
                if version == 5:
                    x = (x0 + w0 / 2) / width
                    y = (y0 + h0 / 2) / height
                elif version == 3:
                    x = x0 / width
                    y = y0 / height
                else:
                    print("Version of YOLO error.")
                    return
                w = w0 / width
                h = h0 / height

                print(f"Original class index: {labels[datas[0]]}, New class index: {class_idx}")  # 调试信息

                yolo_output_file.write(' '.join([str(class_idx),
                                                 format(x, '.6f'),
                                                 format(y, '.6f'),
                                                 format(w, '.6f'),
                                                 format(h, '.6f'),
                                                 ]) + '\n')
                line = txt.readline()

            yolo_output_file.close()

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('anndir', type=str, default='/root/autodl-tmp/ExDark/Annotations', help="ExDark annotations directory.")         # ExDark的标注目录
    parser.add_argument('imgdir', type=str, default='/root/autodl-tmp/ExDark/images', help="ExDark images directory.")                   # ExDark的图像目录
    parser.add_argument('--ratio', type=str, default='8:1:1', help="Ratio between train/test/val, default 8:1:1.")     # train/test/val之间的比例,默认为8:1:1
    parser.add_argument('--version', type=int, choices=[3, 5], default=5, help="Version of YOLO(3 or 5), default 5.") # YOLO版本(3或5)
    parser.add_argument('--output-dir', type=str, default="output", help="Images and converted YOLO annotations output directory.") # 图像和转换后的yolo标注的输出目录
    args = parser.parse_args()
    ExDark2Yolo(args.anndir, args.imgdir, args.ratio, args.version, args.output_dir)

使用脚本来将 ExDark 数据集转换为 YOLO 格式:

python exdark2yolo.py /root/autodl-tmp/ExDark/Annotations /root/autodl-tmp/ExDark/images --output-dir /root/autodl-tmp/ExDark/output --version 5 --ratio 8:1:1

  • /root/autodl-tmp/ExDark/Annotations:ExDark 数据集的标注文件目录。
  • /root/autodl-tmp/ExDark/images:ExDark 数据集的图像文件目录。
  • --output-dir /root/autodl-tmp/ExDark/output:转换后的 YOLO 格式文件输出目录。
  • --version 5:YOLO 的版本(此处使用 YOLOv5)。
  • --ratio 8:1:1:训练、测试和验证数据集的比例。

coco数据集和ExDark数据集合并

import os
import shutil
import argparse

def merge_datasets(dataset1_img_dirs, dataset1_label_dirs,
                   dataset2_img_dirs, dataset2_label_dirs,
                   output_img_dirs, output_label_dirs):
    # 不包括 'test' 子文件夹
    for subfolder in ['train', 'val']:
        dataset1_img_dir = dataset1_img_dirs.get(subfolder, None)
        dataset1_label_dir = dataset1_label_dirs.get(subfolder, None)
        dataset2_img_dir = dataset2_img_dirs.get(subfolder, None)
        dataset2_label_dir = dataset2_label_dirs.get(subfolder, None)
        output_img_dir = output_img_dirs.get(subfolder, None)
        output_label_dir = output_label_dirs.get(subfolder, None)

        if not dataset1_img_dir or not dataset1_label_dir or not dataset2_img_dir or not dataset2_label_dir:
            print(f"Warning: Missing directories for {subfolder}. Skipping...")
            continue

        os.makedirs(output_img_dir, exist_ok=True)
        os.makedirs(output_label_dir, exist_ok=True)

        def copy_files(img_dir, label_dir, dst_img_dir, dst_label_dir):
            img_files = [f for f in os.listdir(img_dir) if os.path.isfile(os.path.join(img_dir, f))]
            for img_file in img_files:
                img_src_path = os.path.join(img_dir, img_file)
                img_dst_path = os.path.join(dst_img_dir, img_file)
                shutil.copy(img_src_path, img_dst_path)

            label_files = [f for f in os.listdir(label_dir) if os.path.isfile(os.path.join(label_dir, f))]
            for label_file in label_files:
                label_src_path = os.path.join(label_dir, label_file)
                label_dst_path = os.path.join(dst_label_dir, label_file)
                shutil.copy(label_src_path, label_dst_path)

        copy_files(dataset1_img_dir, dataset1_label_dir, output_img_dir, output_label_dir)
        copy_files(dataset2_img_dir, dataset2_label_dir, output_img_dir, output_label_dir)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset1-img-dir', type=str, required=True, help="First dataset images directory (root)")
    parser.add_argument('--dataset1-label-dir', type=str, required=True, help="First dataset labels directory (root)")
    parser.add_argument('--dataset2-img-dir', type=str, required=True, help="Second dataset images directory (root)")
    parser.add_argument('--dataset2-label-dir', type=str, required=True, help="Second dataset labels directory (root)")
    parser.add_argument('--output-img-dir', type=str, required=True, help="Output directory for merged images (root)")
    parser.add_argument('--output-label-dir', type=str, required=True, help="Output directory for merged labels (root)")
    
    args = parser.parse_args()

    dataset1_img_dirs = {
        'train': os.path.join(args.dataset1_img_dir, 'train2017'),
        'val': os.path.join(args.dataset1_img_dir, 'val2017')
    }
    dataset1_label_dirs = {
        'train': os.path.join(args.dataset1_label_dir, 'train2017'),
        'val': os.path.join(args.dataset1_label_dir, 'val2017')
    }
    dataset2_img_dirs = {
        'train': os.path.join(args.dataset2_img_dir, 'train'),
        'val': os.path.join(args.dataset2_img_dir, 'val')
    }
    dataset2_label_dirs = {
        'train': os.path.join(args.dataset2_label_dir, 'train'),
        'val': os.path.join(args.dataset2_label_dir, 'val')
    }
    output_img_dirs = {
        'train': os.path.join(args.output_img_dir, 'train'),
        'val': os.path.join(args.output_img_dir, 'val')
    }
    output_label_dirs = {
        'train': os.path.join(args.output_label_dir, 'train'),
        'val': os.path.join(args.output_label_dir, 'val')
    }

    merge_datasets(dataset1_img_dirs, dataset1_label_dirs,
                   dataset2_img_dirs, dataset2_label_dirs,
                   output_img_dirs, output_label_dirs)

python /root/autodl-tmp/merge_datasets.py \
    --dataset1-img-dir /root/autodl-tmp/coco/images \
    --dataset1-label-dir /root/autodl-tmp/coco/labels \
    --dataset2-img-dir /root/autodl-tmp/ExDark/output/images \
    --dataset2-label-dir /root/autodl-tmp/ExDark/output/labels \
    --output-img-dir /root/autodl-tmp/merge/images \
    --output-label-dir /root/autodl-tmp/merge/labels

3.模型训练

创建新的yaml文件

yolov10/ultralytics/cfg/datasets下新建coco_ExDark.yaml文件,并修改相应路径

# Ultralytics YOLO 🚀, AGPL-3.0 license
# COCO 2017 dataset https://cocodataset.org by Microsoft
# Documentation: https://docs.ultralytics.com/datasets/detect/coco/
# Example usage: yolo train data=coco.yaml
# parent
# ├── ultralytics
# └── datasets
#     └── coco  ← downloads here (20.1 GB)

# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
path: /root/autodl-tmp/merge # dataset root dir
train: images/train # train images (relative to 'path') 118287 images
val: images/val # val images (relative to 'path') 5000 images
test: /root/autodl-tmp/ExDark/output/images/test # test images (relative to 'path') 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794

# Labels
labels:
  train: labels/train # train labels
  val: labels/val # val labels


# Classes
names:
  0: person
  1: bicycle
  2: car
  3: motorcycle
  4: airplane
  5: bus
  6: train
  7: truck
  8: boat
  9: traffic light
  10: fire hydrant
  11: stop sign
  12: parking meter
  13: bench
  14: bird
  15: cat
  16: dog
  17: horse
  18: sheep
  19: cow
  20: elephant
  21: bear
  22: zebra
  23: giraffe
  24: backpack
  25: umbrella
  26: handbag
  27: tie
  28: suitcase
  29: frisbee
  30: skis
  31: snowboard
  32: sports ball
  33: kite
  34: baseball bat
  35: baseball glove
  36: skateboard
  37: surfboard
  38: tennis racket
  39: bottle
  40: wine glass
  41: cup
  42: fork
  43: knife
  44: spoon
  45: bowl
  46: banana
  47: apple
  48: sandwich
  49: orange
  50: broccoli
  51: carrot
  52: hot dog
  53: pizza
  54: donut
  55: cake
  56: chair
  57: couch
  58: potted plant
  59: bed
  60: table
  61: toilet
  62: tv
  63: laptop
  64: mouse
  65: remote
  66: keyboard
  67: cell phone
  68: microwave
  69: oven
  70: toaster
  71: sink
  72: refrigerator
  73: book
  74: clock
  75: vase
  76: scissors
  77: teddy bear
  78: hair drier
  79: toothbrush

训练文件train.py

# coding:utf-8
from ultralytics import YOLOv10

#yolov10s
# 模型配置文件
yolov10s_model_yaml_path = "ultralytics/cfg/models/v10/yolov10s.yaml"
# 数据集配置文件
yolov10s_data_yaml_path = 'ultralytics/cfg/datasets/mycoco.yaml'
# 预训练模型
yolov10s_pre_model_name = 'model/yolov10s.pt'


 
if __name__ == '__main__':
 # 加载yolov10s预训练模型
 model = YOLOv10(yolov10s_model_yaml_path).load(yolov10s_pre_model_name)
 # # 训练yolov10s模型
 results = model.train(data=yolov10s_data_yaml_path,epochs=100,batch=8,name='train_v10')

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值