yolov8训练coco2017 数据集
安装yolov8
pip install ultralytics -i http://mirrors.aliyun.com/pypi/simple/
下载coco 数据集
test 可以不下载 使用前三个链接的文件
wget http://images.cocodataset.org/zips/train2017.zip
wget http://images.cocodataset.org/zips/val2017.zip
wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
wget http://images.cocodataset.org/zips/test2017.zip
wget http://images.cocodataset.org/annotations/image_info_test2017.zip
创建文件夹
以下过程极其重要 必须完全一致
先将下载的数据集解压
1.创建data 文件夹
2.在data文件夹下 创建 images labels 文件夹
3.将解压的 train2017 val2017下图片全部剪切到images文件夹下
4.将解压的annotations 目录剪切到data 目录下
最终形成以下目录树
├─data
│ ├─annotations
│ ├─images
│ └─labels
创建python 文件
在data 文件夹下创建py文件,并执行
# -*- encoding: utf-8 -*-
'''
File : cocotoyolo.py
Time : 2023/04/25 16:53:42
Author : 千秋
Version : 1.0
Contact : spirit@qq.com
'''
#COCO 格式的数据集转化为 YOLO 格式的数据集
#--json_path 输入的json文件路径
#--save_path 保存的文件夹名字,默认为当前目录下的labels。
import os
import json
from tqdm import tqdm
def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = box[0] + box[2] / 2.0
y = box[1] + box[3] / 2.0
w = box[2]
h = box[3]
#round函数确定(xmin, ymin, xmax, ymax)的小数位数
x = round(x * dw, 6)
w = round(w * dw, 6)
y = round(y * dh, 6)
h = round(h * dh, 6)
return (x, y, w, h)
if __name__ == '__main__':
#这里根据自己的json文件位置,换成自己的就行
root = "./"
json_trainfile = root+'annotations/instances_train2017.json' # COCO Object Instance 类型的标注
json_valfile = root+'annotations/instances_val2017.json' # COCO Object Instance 类型的标注
ana_txt_save_path = root+'labels/' # 保存的路径
traindata = json.load(open(json_trainfile, 'r'))
valdata = json.load(open(json_valfile, 'r'))
# 重新映射并保存class 文件
if not os.path.exists(ana_txt_save_path):
os.makedirs(ana_txt_save_path)
id_map = {} # coco数据集的id不连续!重新映射一下再输出!
with open(os.path.join(root, 'classes.txt'), 'w') as f:
# 写入classes.txt
for i, category in enumerate(traindata['categories']):
f.write(f"{category['name']}\n")
id_map[category['id']] = i
'''
保存train txt
'''
# print(id_map)
#这里需要根据自己的需要,更改写入图像相对路径的文件位置。
list_file = open(os.path.join(root, 'train2017.txt'), 'w')
for img in tqdm(traindata['images']):
filename = img["file_name"]
img_width = img["width"]
img_height = img["height"]
img_id = img["id"]
head, tail = os.path.splitext(filename)
ana_txt_name = head + ".txt" # 对应的txt名字,与jpg一致
f_txt = open(os.path.join(ana_txt_save_path, ana_txt_name), 'w')
for ann in traindata['annotations']:
if ann['image_id'] == img_id:
box = convert((img_width, img_height), ann["bbox"])
f_txt.write("%s %s %s %s %s\n" % (id_map[ann["category_id"]], box[0], box[1], box[2], box[3]))
f_txt.close()
#将图片的相对路径写入train2017或val2017的路径
list_file.write('data/images/%s.jpg\n' %(head))
list_file.close()
'''
保存val txt
'''
# print(id_map)
#这里需要根据自己的需要,更改写入图像相对路径的文件位置。
list_file = open(os.path.join(root, 'val2017.txt'), 'w')
for img in tqdm(valdata['images']):
filename = img["file_name"]
img_width = img["width"]
img_height = img["height"]
img_id = img["id"]
head, tail = os.path.splitext(filename)
ana_txt_name = head + ".txt" # 对应的txt名字,与jpg一致
f_txt = open(os.path.join(ana_txt_save_path, ana_txt_name), 'w')
for ann in valdata['annotations']:
if ann['image_id'] == img_id:
box = convert((img_width, img_height), ann["bbox"])
f_txt.write("%s %s %s %s %s\n" % (id_map[ann["category_id"]], box[0], box[1], box[2], box[3]))
f_txt.close()
#将图片的相对路径写入train2017或val2017的路径
list_file.write('data/images/%s.jpg\n' %(head))
list_file.close()
创建yaml 文件
在data 文件夹下 创建my.yaml 文件
path: E:\\Backup\\Desktop\\yolov8\\data # 修改为自己的data路径
train: train2017.txt # train images (relative to 'path') 118287 images
val: val2017.txt # val images (relative to 'path') 5000 images
# Classes
names:
0: person
1: bicycle
2: car
3: motorcycle
4: airplane
5: bus
6: train
7: truck
8: boat
9: traffic light
10: fire hydrant
11: stop sign
12: parking meter
13: bench
14: bird
15: cat
16: dog
17: horse
18: sheep
19: cow
20: elephant
21: bear
22: zebra
23: giraffe
24: backpack
25: umbrella
26: handbag
27: tie
28: suitcase
29: frisbee
30: skis
31: snowboard
32: sports ball
33: kite
34: baseball bat
35: baseball glove
36: skateboard
37: surfboard
38: tennis racket
39: bottle
40: wine glass
41: cup
42: fork
43: knife
44: spoon
45: bowl
46: banana
47: apple
48: sandwich
49: orange
50: broccoli
51: carrot
52: hot dog
53: pizza
54: donut
55: cake
56: chair
57: couch
58: potted plant
59: bed
60: dining table
61: toilet
62: tv
63: laptop
64: mouse
65: remote
66: keyboard
67: cell phone
68: microwave
69: oven
70: toaster
71: sink
72: refrigerator
73: book
74: clock
75: vase
76: scissors
77: teddy bear
78: hair drier
79: toothbrush
创建训练文件
在data 文件夹同级创建train.py 文件,运行train.py 即完成训练 并导出onnx 模型,注意训练batch 一定要是16的倍数 才能保证输入batch 为1
from ultralytics import YOLO
if __name__=='__main__':
# Create a new YOLO model from scratch
model = YOLO('yolov8s.yaml')
# Load a pretrained YOLO model (recommended for training)
model = YOLO('yolov8s.pt')
# Train the model using the 'coco128.yaml' dataset for 3 epochs
results = model.train(data='data/my.yaml', epochs=30,batch=16,workers=1,imgsz=320)
# Evaluate the model's performance on the validation set
results = model.val()
# Export the model to ONNX format
success = model.export(format='onnx', opset=12,imgsz=320)