【OpenCV进阶】YOLOV4模型-图像和视频检测

📢:如果你也对机器人、人工智能感兴趣,看来我们志同道合✨
📢:不妨浏览一下我的博客主页【https://blog.csdn.net/weixin_51244852
📢:文章若有幸对你有帮助,可点赞 👍 收藏 ⭐不迷路🙉
📢:内容若有错误,敬请留言 📝指正!原创文,转载请注明出处


前言

1.openvino概述

计算机视觉模型加速推断的开源框架
参考博客:OpenVINO之一:OpenVINO概述
补充:cpu加速的要求是酷睿i5及以上。在cpu上获得十倍以上的加速

2.YOLO概述

参考博客:死磕YOLO系列,YOLOv1 的大脑、躯干和手脚
2020年7月18日,OpenCV官网发布了OpenCV的最新版本OpenCV4.4.0,其中支持了YOLOv4。因此4.4版本之前都是不支持的。

一、图像检测

1.全部代码

#include<opencv2\opencv.hpp>
#include<opencv2\dnn.hpp>
#include<fstream>
#include<iostream>

using namespace std;
using namespace cv;
using namespace cv::dnn;

int main()
{
	//---------------------------------------加载类别---------------------------------------
	ifstream classNamesFile("D:/opencv-4.4.0/models/yolov4/coco.names");
	vector<string> classNamesVec;
	if (classNamesFile.is_open())
	{
		string className = "";
		while (getline(classNamesFile, className))
			classNamesVec.push_back(className);
	}
	for (int i = 0; i < classNamesVec.size(); i++) {
		cout << i + 1 << "\t" << classNamesVec[i].c_str() << endl;
	}

	//---------------------------------------模型设置---------------------------------------
	String cfg = "D:/opencv-4.4.0/models/yolov4/yolov4.cfg";
	String weight = "D:/opencv-4.4.0/models/yolov4/yolov4.weights";

	//模型读入
	Net net = readNetFromDarknet(cfg, weight);

	//预处理读取的图像,并将图像读入网络
	Mat frame = imread("D:/images/dog.jpg");
	imshow("【输入图像】", frame);
	Mat inputBlob = blobFromImage(frame, 1.0 / 255, Size(608, 608), Scalar());
	net.setInput(inputBlob);

	//获取未连接输出层
	vector<String> outNames = net.getUnconnectedOutLayersNames();
	for (int i = 0; i < outNames.size(); i++) {
		cout << "output layer name : " << outNames[i].c_str() << endl;
	}
	vector<Mat> outs;
	net.forward(outs, outNames);

	//---------------------------------------目标检测---------------------------------------
	float* data;
	Mat scores;
	vector<Rect> boxes;
	vector<int> classIds;
	vector<float> confidences;
	int centerX, centerY, width, height, left, top;
	float confidenceThreshold = 0.2;
	double confidence;
	Point classIdPoint;
	
	//找出所有的目标及其位置
	for (size_t i = 0; i<outs.size(); ++i) {
		data = (float*)outs[i].data;
		for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols) {
			scores = outs[i].row(j).colRange(5, outs[i].cols);
			minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
			if (confidence > confidenceThreshold) {
				centerX = (int)(data[0] * frame.cols);
				centerY = (int)(data[1] * frame.rows);
				width = (int)(data[2] * frame.cols);
				height = (int)(data[3] * frame.rows);
				left = centerX - width / 2;
				top = centerY - height / 2;

				classIds.push_back(classIdPoint.x);
				confidences.push_back((float)confidence);
				boxes.push_back(Rect(left, top, width, height));
			}
		}
	}

	vector<int> indices;
	NMSBoxes(boxes, confidences, 0.3, 0.2, indices);

	//---------------------------------------效果展示---------------------------------------
	Scalar rectColor, textColor; //box 和 text 的颜色
	Rect box, textBox;
	int idx;
	String className;
	Size labelSize;
	for (size_t i = 0; i < indices.size(); ++i) {
		idx = indices[i];
		className = classNamesVec[classIds[idx]];
		labelSize = getTextSize(className, FONT_HERSHEY_SIMPLEX, 0.5, 1, 0);
		box = boxes[idx];
		textBox = Rect(Point(box.x - 1, box.y),
		Point(box.x + labelSize.width, box.y - labelSize.height));
		rectColor = Scalar(idx * 11 % 256, idx * 22 % 256, idx * 33 % 256);
		textColor = Scalar(255 - idx * 11 % 256, 255 - idx * 22 % 256, 255 - idx * 33 % 256);
		rectangle(frame, box, rectColor, 2, 8, 0);
		rectangle(frame, textBox, rectColor, -1, 8, 0);
		putText(frame, className.c_str(), Point(box.x, box.y - 2), FONT_HERSHEY_SIMPLEX, 0.5, textColor, 1, 8);
	}
	
	imshow("【OpenCV-YOLO】", frame);
	waitKey(0);
	return 0;
}

2效果演示

在这里插入图片描述
coco.names文档内容就是所有类别。
在这里插入图片描述

二、视频检测

在环境:win10;opencv4.4;CPU:i59600KF;YOLOV4在实时检测方面帧率大概在2-4帧/秒。

1.全部代码

#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <cstdlib>

using namespace std;
using namespace cv;
using namespace cv::dnn;
void image_detection();

String yolo_cfg = "D:/opencv-4.4.0/models/yolov4/yolov4.cfg";
String yolo_model = "D:/opencv-4.4.0/models/yolov4/yolov4.weights";

int main()
{
	Net net = readNetFromDarknet(yolo_cfg, yolo_model);
	vector<string> classNamesVec;
	ifstream classNamesFile("D:/opencv-4.4.0/models/yolov4/coco.names");
	if (classNamesFile.is_open())
	{
		string className = "";
		while (std::getline(classNamesFile, className))
			classNamesVec.push_back(className);
	}

	//net.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);
	net.setPreferableTarget(DNN_TARGET_CPU);
	net.setPreferableBackend(DNN_BACKEND_OPENCV);

	std::vector<String> outNames = net.getUnconnectedOutLayersNames();
	for (int i = 0; i < outNames.size(); i++) {
		printf("output layer name : %s\n", outNames[i].c_str());
	}

	VideoCapture capture(0);
	Mat frame;
	while (capture.read(frame))
	{
		transpose(frame, frame);
		flip(frame, frame, 1);
		Mat inputBlob = blobFromImage(frame, 1 / 255.F, Size(416, 416), Scalar(), true, false);
		net.setInput(inputBlob);

		// 输出检测频率和每帧耗时
		std::vector<Mat> outs;
		net.forward(outs, outNames);
		vector<double> layersTimings;
		double freq = getTickFrequency() / 1000;
		double time = net.getPerfProfile(layersTimings) / freq;
		ostringstream ss;
		ss << "FPS" << 1000 / time << ";time:" << time << "ms";
		putText(frame, ss.str(), Point(20, 20), FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255), 2, 8);

		// 输出检测框和置信度
		vector<Rect> boxes;
		vector<int> classIds;
		vector<float> confidences;
		for (size_t i = 0; i < outs.size(); ++i)
		{
			float* data = (float*)outs[i].data;
			for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols)
			{
				Mat scores = outs[i].row(j).colRange(5, outs[i].cols);
				Point classIdPoint;
				double confidence;
				minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
				if (confidence > 0.5)
				{
					int centerX = (int)(data[0] * frame.cols);
					int centerY = (int)(data[1] * frame.rows);
					int width = (int)(data[2] * frame.cols);
					int height = (int)(data[3] * frame.rows);
					int left = centerX - width / 2;
					int top = centerY - height / 2;

					classIds.push_back(classIdPoint.x);
					confidences.push_back((float)confidence);
					boxes.push_back(Rect(left, top, width, height));

				}
			}
		}

		vector<int> indices;
		NMSBoxes(boxes, confidences, 0.5, 0.2, indices);
		for (size_t i = 0; i < indices.size(); ++i)
		{
			int idx = indices[i];
			Rect box = boxes[idx];
			String className = classNamesVec[classIds[idx]];
			putText(frame, format("%s:%.2f",className.c_str(), confidences[idx]), box.tl(), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(255, 0, 0), 2, 8);
			rectangle(frame, box, Scalar(0, 0, 255), 2, 8, 0);
		}

		imshow("YOLOv4", frame);
		char c = waitKey(5);
		if (c == 27) { // ESC退出
			break;
		}
	}
	capture.release();//释放资源
	waitKey(0);
	return 0;
}

2.效果展示

在这里插入图片描述

三、自训练模型检测

文字教程:
基于Opencv4.4的YOLOv4目标检测
Windows10系统下YOLOv4—Darknet训练过程
训练检测足球的模型:
自训练足球检测模型,训练完数据集后会根据迭代次数生成多个权重文件(.weight),yolov4-voc-ball_3000.weights就是迭代了3000次之后生成的。
而配置文件(.cfg)在训练数据的时候需要设置好参数进行使用,在训练完成后进行测试时,也要用到该文件,并且需要更改里面的少部分参数,如下图其中3、4行代码里面的参数表示在测试的时候需要调整的,而6、7是训练时用到的。
此外,opencv能支持YOLO,实际上就是增加了一个跑YOLO的接口,基于darknet训练出来的YOLO权重文件不仅可以用于opencv,凡是支持YOLO的端口,都可以通过加载权重文件和配置文件实现自定义对象检测。
在这里插入图片描述
其次,在全部代码中需要更改配置文件权重文件以及类别名的路径。
在这里插入图片描述

全部代码

视频检测

#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <cstdlib>

using namespace std;
using namespace cv;
using namespace cv::dnn;
void image_detection();

String yolo_cfg = "D:/opencv-4.4.0/models/yolov4/yolov4-voc-ball-test.cfg";
String yolo_model = "D:/opencv-4.4.0/models/yolov4/yolov4-voc-ball_3000.weights";

int main()
{
	Net net = readNetFromDarknet(yolo_cfg, yolo_model);
	vector<string> classNamesVec;
	ifstream classNamesFile("D:/opencv-4.4.0/models/yolov4/ball.names");
	if (classNamesFile.is_open())
	{
		string className = "";
		while (std::getline(classNamesFile, className))
			classNamesVec.push_back(className);
	}

	//net.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);
	net.setPreferableTarget(DNN_TARGET_CPU);
	net.setPreferableBackend(DNN_BACKEND_OPENCV);

	std::vector<String> outNames = net.getUnconnectedOutLayersNames();
	for (int i = 0; i < outNames.size(); i++) {
		printf("output layer name : %s\n", outNames[i].c_str());
	}

	VideoCapture capture(0);
	Mat frame;
	while (capture.read(frame))
	{
		
		flip(frame, frame, 1);
		Mat inputBlob = blobFromImage(frame, 1 / 255.F, Size(416, 416), Scalar(), true, false);
		net.setInput(inputBlob);

		// 输出检测频率和每帧耗时
		std::vector<Mat> outs;
		net.forward(outs, outNames);
		vector<double> layersTimings;
		double freq = getTickFrequency() / 1000;
		double time = net.getPerfProfile(layersTimings) / freq;
		ostringstream ss;
		ss << "FPS" << 1000 / time << ";time:" << time << "ms";
		putText(frame, ss.str(), Point(20, 20), FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255), 2, 8);

		// 输出检测框和置信度
		vector<Rect> boxes;
		vector<int> classIds;
		vector<float> confidences;
		for (size_t i = 0; i < outs.size(); ++i)
		{
			float* data = (float*)outs[i].data;
			for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols)
			{
				Mat scores = outs[i].row(j).colRange(5, outs[i].cols);
				Point classIdPoint;
				double confidence;
				minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
				if (confidence > 0.5)
				{
					int centerX = (int)(data[0] * frame.cols);
					int centerY = (int)(data[1] * frame.rows);
					int width = (int)(data[2] * frame.cols);
					int height = (int)(data[3] * frame.rows);
					int left = centerX - width / 2;
					int top = centerY - height / 2;

					classIds.push_back(classIdPoint.x);
					confidences.push_back((float)confidence);
					boxes.push_back(Rect(left, top, width, height));

				}
			}
		}

		vector<int> indices;
		NMSBoxes(boxes, confidences, 0.5, 0.2, indices);
		for (size_t i = 0; i < indices.size(); ++i)
		{
			int idx = indices[i];
			Rect box = boxes[idx];
			String className = classNamesVec[classIds[idx]];
			putText(frame, format("%s:%.2f", className.c_str(), confidences[idx]), box.tl(), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(255, 0, 0), 2, 8);
			rectangle(frame, box, Scalar(0, 0, 255), 2, 8, 0);
		}

		imshow("YOLOv4", frame);
		char c = waitKey(5);
		if (c == 27) { // ESC退出
			break;
		}
	}
	capture.release();//释放资源
	waitKey(0);
	return 0;
}

在这里插入图片描述
在这里插入图片描述

  • 2
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

嵌小超

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值