Tensorrt10.8推理Yolov8

部署运行你感兴趣的模型镜像

yolov8_yolo11_yolo12_det.h

#pragma once
#include <fstream>
#include <iostream>
#include <sstream>
#include <opencv2/opencv.hpp>
#include "NvInfer.h"
#include "NvOnnxParser.h"
struct DetectResult {
	int classId;
	float score;
	cv::Rect box;
};
using namespace nvinfer1;
using namespace nvonnxparser;
using namespace cv;

class YOLOv81112TRTDetector {
public:
	void initConfig(std::string enginefile, float confidence_threshold, float score_threshold);
	void detect(cv::Mat& frame, std::vector<DetectResult>& results);
	~YOLOv81112TRTDetector();
private:
	float confidence_threshold = 0.4;
	float score_threshold = 0.25;
	int input_w;
	int input_h;
	int output_h;
	int output_w;
	ICudaEngine* engine{ nullptr };
	IExecutionContext* context{ nullptr };
	IRuntime* runtime{ nullptr };
	void* buffers[2] = { NULL, NULL };
	std::vector<float> prob;
	cudaStream_t stream;
};

yolov8_yolo11_yolo12_det.cpp

#include "yolov8_yolo11_yolo12_det.h"
class Logger : public ILogger
{
	void log(Severity severity, const char* msg)  noexcept
	{
		// suppress info-level messages
		if (severity != Severity::kINFO)
			std::cout << msg << std::endl;
	}
} gLogger;
YOLOv81112TRTDetector::~YOLOv81112TRTDetector() {
	cudaStreamSynchronize(stream);
	cudaStreamDestroy(stream);
	if (!buffers[0]) {
		delete[] buffers;
	}
	std::cout << "destory instance done!" << std::endl;
}
void YOLOv81112TRTDetector::initConfig(std::string enginepath, float conf, float scored) {
	std::ifstream file(enginepath, std::ios::binary);
	char* trtModelStream = NULL;
	int size = 0;
	if (file.good()) {
		file.seekg(0, file.end);
		size = file.tellg();
		file.seekg(0, file.beg);
		trtModelStream = new char[size];
		assert(trtModelStream);
		file.read(trtModelStream, size);
		file.close();
	}
	this->runtime = createInferRuntime(gLogger);
	assert(this->runtime != nullptr);
	this->engine = this->runtime->deserializeCudaEngine(trtModelStream, size);
	assert(this->engine != nullptr);
	this->context = engine->createExecutionContext();
	assert(this->context != nullptr);
	delete[] trtModelStream;
	auto inputDims = this->engine->getTensorShape("images");
	auto outDims = this->engine->getTensorShape("output0");
	this->input_h = inputDims.d[2];
	this->input_w = inputDims.d[3];
	printf("inputH : %d, inputW: %d \n", this->input_h, this->input_w);
	this->output_h = outDims.d[1];
	this->output_w = outDims.d[2];
	std::cout << "out data format: " << this->output_h << "x" << this->output_w << std::endl;
	cudaMalloc(&buffers[0], this->input_h * input_w * 3 * sizeof(float));
	cudaMalloc(&buffers[1], this->output_h * this->output_w * sizeof(float));
	this->prob.resize(this->output_h * this->output_w);
	cudaStreamCreate(&stream);
}
void YOLOv81112TRTDetector::detect(cv::Mat& frame, std::vector<DetectResult>& results) {
	int64 start = cv::getTickCount();
	int w = frame.cols;
	int h = frame.rows;
	int _max = std::max(h, w);
	cv::Mat image = cv::Mat::zeros(cv::Size(_max, _max), CV_8UC3);
	cv::Rect roi(0, 0, w, h);
	frame.copyTo(image(roi));
	float x_factor = image.cols / static_cast<float>(this->input_w);
	float y_factor = image.rows / static_cast<float>(this->input_h);
	cv::Mat blob = cv::dnn::blobFromImage(image, 1 / 255.0, cv::Size(this->input_w, this->input_h), cv::Scalar(0, 0, 0), true, false);
	cudaMemcpyAsync(buffers[0], blob.ptr<float>(), 3 * this->input_h * this->input_w * sizeof(float), cudaMemcpyHostToDevice, stream);	
	context->executeV2(buffers);
	cudaMemcpyAsync(prob.data(), buffers[1], this->output_h * this->output_w * sizeof(float), cudaMemcpyDeviceToHost, stream);
	std::vector<cv::Rect> boxes;
	std::vector<int> classIds;
	std::vector<float> confidences;
	cv::Mat dout(84, 8400, CV_32F, (float*)prob.data());
	cv::Mat det_output = dout.t(); 
	for (int i = 0; i < det_output.rows; i++) {
		cv::Mat classes_scores = det_output.row(i).colRange(4, 84);
		cv::Point classIdPoint;
		double score;
		minMaxLoc(classes_scores, 0, &score, 0, &classIdPoint);
		if (score > this->score_threshold)
		{
			float cx = det_output.at<float>(i, 0);
			float cy = det_output.at<float>(i, 1);
			float ow = det_output.at<float>(i, 2);
			float oh = det_output.at<float>(i, 3);
			int x = static_cast<int>((cx - 0.5 * ow) * x_factor);
			int y = static_cast<int>((cy - 0.5 * oh) * y_factor);
			int width = static_cast<int>(ow * x_factor);
			int height = static_cast<int>(oh * y_factor);
			cv::Rect box;
			box.x = x;
			box.y = y;
			box.width = width;
			box.height = height;
			boxes.push_back(box);
			classIds.push_back(classIdPoint.x);
			confidences.push_back(score);
		}
	}
	std::vector<int> indexes;
	cv::dnn::NMSBoxes(boxes, confidences, 0.25, 0.45, indexes);
	for (size_t i = 0; i < indexes.size(); i++) {
		DetectResult dr;
		int index = indexes[i];
		int idx = classIds[index];
		dr.box = boxes[index];
		dr.classId = idx;
		dr.score = confidences[index];
		cv::rectangle(frame, boxes[index], cv::Scalar(0, 0, 255), 1, 8);
		cv::rectangle(frame, cv::Point(boxes[index].tl().x, boxes[index].tl().y - 20),
			cv::Point(boxes[index].br().x, boxes[index].tl().y), cv::Scalar(0, 255, 255), -1);
		results.push_back(dr);
	}
	float t = (cv::getTickCount() - start) / static_cast<float>(cv::getTickFrequency());
	putText(frame, cv::format("FPS: %.2f", 1.0 / t), cv::Point(20, 40), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(0, 255, 0), 2, 8);
}

main.cpp

#include "yolov8_yolo11_yolo12_det.h"
#include <iostream>
#include <fstream>
std::string label_map = "coco.names";
int main(int argc, char** argv) {
	std::vector<std::string> classNames;
	std::ifstream fp(label_map);
	std::string name;
	while (!fp.eof()) {
		getline(fp, name);
		if (name.length()) {
			classNames.push_back(name);
		}
	}
	fp.close();
	std::shared_ptr<YOLOv81112TRTDetector> detector(new YOLOv81112TRTDetector());
	detector->initConfig("yolov8.engine", 0.4, 0.25f);
	cv::VideoCapture capture("people.mp4");
	cv::Mat frame;
	std::vector<DetectResult> results;
	while (true) {
		bool ret = capture.read(frame);
		if (frame.empty()) {
			break;
		}
		detector->detect(frame, results);
		for (DetectResult dr : results) {
			cv::Rect box = dr.box;
			cv::putText(frame, classNames[dr.classId], cv::Point(box.tl().x, box.tl().y - 10), cv::FONT_HERSHEY_SIMPLEX, .5, cv::Scalar(0, 0, 0));
		}
		cv::imshow("YOLOv8对象检测 + TensorRT10.8 - OpenCV", frame);
		char c = cv::waitKey(1);
		if (c == 27) { // ESC 退出
			break;
		}
		// reset for next frame
		results.clear();
	}
	return 0;
}

CMakeLists.txt

cmake_minimum_required(VERSION 3.18)
project(yolov8) 	
set(OpenCV_DIR "E:\\Opencv gpu\\newbuild\\install") 
set(OpenCV_INCLUDE_DIRS ${OpenCV_DIR}\\include)
set(OpenCV_LIB_DIRS ${OpenCV_DIR}\\x64\\vc17\\lib)
set(OpenCV_LIB_DEBUG ${OpenCV_DIR}\\x64\\vc17\\lib\\opencv_world470d.lib
                     ${OpenCV_DIR}\\x64\\vc17\\lib\\opencv_img_hash470d.lib) 
set(OpenCV_LIB_RELEASE ${OpenCV_DIR}\\x64\\vc17\\lib\\opencv_world470.lib
                       ${OpenCV_DIR}\\x64\\vc17\\lib\\opencv_img_hash470.lib)				
set(CMAKE_CUDA_ARCHITECTURES 86)		
find_package(CUDA REQUIRED)			
enable_language(CUDA)  						
find_package(OpenCV QUIET)
include_directories(${CUDA_INCLUDE_DIRS})		
include_directories(${OpenCV_INCLUDE_DIRS}) 
link_directories(${OpenCV_LIB_DIRS})  		
set(SOURCES
    main.cpp
    yolov8_yolo11_yolo12_det.cpp
    yolov8_yolo11_yolo12_det.h
)
add_executable(yolov8 ${SOURCES})
target_link_libraries(yolov8 "nvinfer.lib" "nvinfer_plugin.lib" "nvonnxparser.lib")  	    		
target_link_libraries(yolov8 ${CUDA_LIBRARIES}) 
target_link_libraries(yolov8
    $<$<CONFIG:Debug>:${OpenCV_LIB_DEBUG}>
    $<$<CONFIG:Release>:${OpenCV_LIB_RELEASE}>
)

您可能感兴趣的与本文相关的镜像

Yolo-v5

Yolo-v5

Yolo

YOLO(You Only Look Once)是一种流行的物体检测和图像分割模型,由华盛顿大学的Joseph Redmon 和Ali Farhadi 开发。 YOLO 于2015 年推出,因其高速和高精度而广受欢迎

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值