基于yolov5-6.0版本的PCB板缺陷检测(Python/C++部署)

源码下载:https://gitee.com/zhankun3280/my_yolov5-6.0

1、依赖环境

  • ubuntu 18.04
  • python 3.6.9
  • opencv 4.5.0
  • pytorch 1.9.0
  • torchvision 0.10.0

2、准备数据集

链接: https://pan.baidu.com/s/1TqCX5nRPXIZ1jbyaye3e3A 密码: 9fr6

3、训练数据相关工作

(1)解压数据集,放在yolov5-6.0根目录下
(2)删除ImageSets文件夹下的内容,在/ImageSets文件夹下新建Main文件夹
(3)数据集分类,运行test.py(注意路径正确)。会在/ImageSets/Main文件夹下生成四个txt文件,trainval.txt、test.txt、train.txt、val.txt。

# coding:utf-8

import os
import random
import argparse

parser = argparse.ArgumentParser()
#xml文件的地址,根据自己的数据进行修改 xml一般存放在Annotations下
parser.add_argument('--xml_path', default='./Annotations', type=str, help='input xml label path')
#数据集的划分,地址选择自己数据下的ImageSets/Main
parser.add_argument('--txt_path', default='./ImageSets/Main', type=str, help='output txt label path')
opt = parser.parse_args()

trainval_percent = 1.0
train_percent = 0.9
xmlfilepath = opt.xml_path
txtsavepath = opt.txt_path
total_xml = os.listdir(xmlfilepath)
if not os.path.exists(txtsavepath):
    os.makedirs(txtsavepath)

num = len(total_xml)
list_index = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list_index, tv)
train = random.sample(trainval, tr)

file_trainval = open(txtsavepath + '/trainval.txt', 'w')
file_test = open(txtsavepath + '/test.txt', 'w')
file_train = open(txtsavepath + '/train.txt', 'w')
file_val = open(txtsavepath + '/val.txt', 'w')


for i in list_index:
    name = total_xml[i][:-4] + '\n'
    if i in trainval:
        file_trainval.write(name)
        if i in train:
            file_train.write(name)
        else:
            file_val.write(name)
    else:
        file_test.write(name)


file_trainval.close()
file_train.close()
file_val.close()
file_test.close()

(4)在data文件夹下新建labels文件夹,然后在yolov5-6.0文件夹下新建my_labels.py文件,并运行(注意路径,注意数据集类别)。会在data文件夹下生成三个txt文件,train.txt、val.txt、test.txt。

# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import os
from os import getcwd

sets = ['train', 'val', 'test']
classes = ['missing_hole','mouse_bite', 'open_circuit', 'short', 'spur', 'spurious_copper']  #自己训练的类别


abs_path = os.getcwd()
print(abs_path)

def convert(size, box):
    dw = 1. / (size[0])
    dh = 1. / (size[1])
    x = (box[0] + box[1]) / 2.0 - 1
    y = (box[2] + box[3]) / 2.0 - 1
    w = box[1] - box[0]
    h = box[3] - box[2]
    x = x * dw
    w = w * dw
    y = y * dh
    h = h * dh
    return x, y, w, h

def convert_annotation(image_id):
    in_file = open ('/home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/Annotations/%s.xml' % (image_id), encoding='UTF-8')
    out_file = open('/home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/labels/%s.txt' % (image_id), 'w')
    tree = ET.parse(in_file)
    root = tree.getroot()
    size = root.find('size')
    w = int(size.find('width').text)
    h = int(size.find('height').text)
    for obj in root.iter('object'):
        # difficult = obj.find('difficult').text
        if obj.find('difficult'):
            difficult = float(obj.find('difficult').text)
        else:
            difficult = 0
        cls = obj.find('name').text
        if cls not in classes or int(difficult) == 1:
            continue
        cls_id = classes.index(cls)
        xmlbox = obj.find('bndbox')
        b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
             float(xmlbox.find('ymax').text))
        b1, b2, b3, b4 = b
        # 标注越界修正
        if b2 > w:
            b2 = w
        if b4 > h:
            b4 = h
        b = (b1, b2, b3, b4)
        bb = convert((w, h), b)
        out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')

wd = getcwd()
for image_set in sets:
    if not os.path.exists('/home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/labels/'):
        os.makedirs('/home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/labels/')
    image_ids = open('/home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/ImageSets/Main/%s.txt' % (image_set)).read().strip().split()
    list_file = open('/home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/%s.txt' % (image_set), 'w')
    for image_id in image_ids:
        list_file.write(abs_path +  '/data/images/%s.jpg\n' % (image_id))
        convert_annotation(image_id)
    list_file.close()

(5)在models文件夹下新建文件my_pcb.yaml(注意修改类别数量nc)

# YOLOv5 🚀 by Ultralytics, GPL-3.0 license

# Parameters
nc: 6  # number of classes
depth_multiple: 0.33  # model depth multiple
width_multiple: 0.50  # layer channel multiple
anchors:
  - [10,13, 16,30, 33,23]  # P3/8
  - [30,61, 62,45, 59,119]  # P4/16
  - [116,90, 156,198, 373,326]  # P5/32

# YOLOv5 v6.0 backbone
backbone:
  # [from, number, module, args]
  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
   [-1, 3, C3, [128]],
   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
   [-1, 6, C3, [256]],
   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
   [-1, 9, C3, [512]],
   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
   [-1, 3, C3, [1024]],
   [-1, 1, SPPF, [1024, 5]],  # 9
  ]

# YOLOv5 v6.0 head
head:
  [[-1, 1, Conv, [512, 1, 1]],
   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
   [-1, 3, C3, [512, False]],  # 13

   [-1, 1, Conv, [256, 1, 1]],
   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)

   [-1, 1, Conv, [256, 3, 2]],
   [[-1, 14], 1, Concat, [1]],  # cat head P4
   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)

   [-1, 1, Conv, [512, 3, 2]],
   [[-1, 10], 1, Concat, [1]],  # cat head P5
   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)

   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
  ]

(6)在data文件夹下新建my_data.yaml(注意train和val路径、nc和数据类别名称names)

# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
# COCO 2017 dataset http://cocodataset.org
# Example usage: python train.py --data coco.yaml
# parent
# ├── yolov5
# └── datasets
#     └── coco  ← downloads here


# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
#path: /home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/images  # dataset root dir
train: /home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/train.txt  # train images (relative to 'path') 118287 images
val: /home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/val.txt  # train images (relative to 'path') 5000 images
#test: /home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/yolov5-6.0/data/test.txt  # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794

# Classes
nc: 6  # number of classes
names: ['missing_hole','mouse_bite', 'open_circuit', 'short', 'spur', 'spurious_copper']  # class names

(7)准备预训练文件yolov5s.pt,放在weights文件夹下。
(8)开始训练,在yolov5-6.0根目录下执行训练命令。

4、训练命令

python3 train.py --data data/my_data.yaml --cfg models/my_pcb.yaml --weights weights/yolov5s.pt --batch-size 8 --epochs 100

训练完成后,可以在runs/train文件夹下找到训练的pt权重文件。

5、python3运行检测命令(pt权重文件)

python3 detect.py --weights ./runs/train/exp4/weights/best.pt --source ./data/images/05_spur_07.jpg 

6、pt文件转onnx

# onnx库的安装
pip install -U coremltools onnx scikit-learn==0.19.2
# 预测
python3 export.py --weights ./runs/train/exp4/weights/best.pt --include onnx --dynamic
python3 export.py --weights ./runs/train/exp4/weights/best.pt --include onnx

在export.py中修改下参数(用命令行导出的请加上–opset 12,请注意不要带上dynamic参数,opencv对动态输入支持做的不好,应该是需要opencv4.5.3以上的版本,并且需要TBB支持才行来着)

7、python3运行检测命令(onnx权重文件)

python3 detect.py --weights ./runs/train/exp4/weights/best.onnx --source ./data/images/05_mouse_bite_08.jpg

参考链接:https://blog.csdn.net/magic_ll/article/details/121352907

参考链接:https://blog.csdn.net/qq_34124780/article/details/121079317

8、C++部署yolov5-6.0

(1)将转换得到的best.onnx权重文件拷贝到yolov5-opencv-dnn-cpp-main文件夹下。
(2)修改yolo.h文件(修改成自己的数据类别名称)。

#pragma once
#include<iostream>
#include<opencv2/opencv.hpp>

#define YOLO_P6 false //是否使用P6模型

struct Output {
	int id;             //结果类别id
	float confidence;   //结果置信度
	cv::Rect box;       //矩形框
};

class Yolo {
public:
	Yolo() {
	}
	~Yolo() {}
	bool readModel(cv::dnn::Net& net, std::string& netPath, bool isCuda);
	bool Detect(cv::Mat& SrcImg, cv::dnn::Net& net, std::vector<Output>& output);
	void drawPred(cv::Mat& img, std::vector<Output> result, std::vector<cv::Scalar> color);

private:
#if(defined YOLO_P6 && YOLO_P6==true)
	const float netAnchors[4][6] = { { 19,27, 44,40, 38,94 },{ 96,68, 86,152, 180,137 },{ 140,301, 303,264, 238,542 },{ 436,615, 739,380, 925,792 } };

	const int netWidth = 1280;  //ONNX图片输入宽度  1280
	const int netHeight = 1280; //ONNX图片输入高度  1280

	const int strideSize = 4;  //stride size
#else
	const float netAnchors[3][6] = { { 10,13, 16,30, 33,23 },{ 30,61, 62,45, 59,119 },{ 116,90, 156,198, 373,326 } };
	
	const int netWidth = 640;   //ONNX图片输入宽度
	const int netHeight = 640;  //ONNX图片输入高度
	
	const int strideSize = 3;   //stride size
#endif // YOLO_P6

	const float netStride[4] = { 8, 16.0,32,64 };

	float boxThreshold = 0.25;
	float classThreshold = 0.25;

	float nmsThreshold = 0.45;
	float nmsScoreThreshold = boxThreshold * classThreshold;
# if 0
	std::vector<std::string> className = { "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
		"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
		"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
		"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
		"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
		"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
		"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
		"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
		"hair drier", "toothbrush" };
# endif

	std::vector<std::string> className = { "missing_hole", "mouse_bite", "open_circuit", "short", "spur", "spurious_copper"};
//	std::vector<std::string> className = { "plane", "car"};
};

(3)修改main.cpp文件(注意修改成自己的权重文件路径和待检测图片)。

//#include "stdafx.h"
#include "yolo.h"
#include <iostream>
#include<opencv2/opencv.hpp>
#include<math.h>

using namespace std;
using namespace cv;
using namespace dnn;

int main()
{
	string img_path = "/home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/image/01_short_02.jpg";
	string model_path = "/home/lyp/Downloads/yolo/yolov5-opencv-dnn-cpp-main/best.onnx";
	//int num_devices = cv::cuda::getCudaEnabledDeviceCount();
	//if (num_devices <= 0) {
		//cerr << "There is no cuda." << endl;
		//return -1;
	//}
	//else {
		//cout << num_devices << endl;
	//}

	Yolo test;
	Net net;
	if (test.readModel(net, model_path, false)) {
		cout << "read net ok!" << endl;
	}
	else {
		return -1;
	}

	//生成随机颜色
	vector<Scalar> color;
	srand(time(0));
	for (int i = 0; i < 80; i++) {
		int b = rand() % 256;
		int g = rand() % 256;
		int r = rand() % 256;
		color.push_back(Scalar(b, g, r));
	}
	vector<Output> result;
	Mat img = imread(img_path);

	if (test.Detect(img, net, result)) {
		test.drawPred(img, result, color);

	}
	else {
		cout << "Detect Failed!"<<endl;
	}

	system("pause");
    return 0;
}

(4)新建在yolov5-opencv-dnn-cpp-main文件夹下build文件夹,然后在build文件夹下编译。

cd build 
cmake ..
make 

./main

参考链接:https://blog.csdn.net/weixin_43199832/article/details/122323192

  • 2
    点赞
  • 32
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值