双相机,YOLOV5,tensorrt

#include "NvInfer.h"
#include <NvOnnxParser.h>
#include <NvInferRuntime.h>
#include <cuda_runtime.h>
#include <fstream>
#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <librealsense2/rs.hpp>
#include <librealsense2/rsutil.h>

using namespace nvinfer1;
using namespace cv;

class MeiLogger :
		public ILogger
{
public:
	explicit MeiLogger(ILogger::Severity severity = ILogger::Severity::kINFO) : severity_(severity)
	{}

	ILogger::Severity severity_;

	void log(ILogger::Severity severity, const char *msg) noexcept override
	{
		if (severity <= severity_)
		{
			std::cerr << msg << std::endl;
		}
	}
};

static MeiLogger meiLogger;

class Ball :
		public Rect_<float>
{
public:
	Ball(float centerX, float centerY, int labelNum, float confidence) : centerX_(centerX), centerY_(centerY), labelNum_(labelNum), confidence_(confidence)
	{}

	float centerX_, centerY_, confidence_;
	int labelNum_;
};

class meiTrtDetector
{
private:
	char *modelStream_{nullptr};
	IRuntime *meiRuntime_ = nullptr;
	ICudaEngine *meiCudaEngine_ = nullptr;
	IExecutionContext *meiExecutionContext_ = nullptr;
	void *gpuBuffers_[2];
	float *cpuOutputBuffer_;
	char *enginePath_;
	int inputHeight_ = 640;
	int inputWidth_ = 640;
	int offsetX_ = 0;
	int offsetY_ = 0;
	int inputSize_ = inputHeight_ * inputWidth_ * 3;
	int outputSize_ = 0;
	float imgRatio_;
	float *inputBlob_ = new float[inputSize_];
	int outputMaxNum_;
	float minObjectness_;
	float minConfidence_;
	float maxIou_;
	cudaStream_t meiCudaStream_;
	std::vector<Ball> detectedBalls_;
	std::vector<Ball> pickedBalls_;

	//获取网络输出层结构
	int getoutputSize()
	{
		auto out_dims = meiCudaEngine_->getBindingDimensions(1);
		int outputSize = 1;
		for (int j = 0; j < out_dims.nbDims; j++)
		{
			std::cout << "output dim" << j << ": size = " << out_dims.d[j] << std::endl;//以YOLOv5为例,输出1*25200*85
			outputSize *= out_dims.d[j];
		}
		outputMaxNum_ = out_dims.d[1];//即25200
		return outputSize;
	}

	static float calcIou(Ball ball1, Ball ball2)
	{
		Rect_<float> interRect = ball1 & ball2;
		float interArea = interRect.area();
		float unionArea = ball1.area() + ball2.area() - interArea;
		return interArea / unionArea;
	}

public:
	meiTrtDetector(char *enginePath, float minObjectness = 0.5, float minConfidence = 0.6, float maxIou = 0.95) :
			enginePath_(enginePath), minObjectness_(minObjectness), minConfidence_(minConfidence), maxIou_(maxIou)
	{}

	~meiTrtDetector()
	{
		cudaStreamDestroy(meiCudaStream_);
		delete[] inputBlob_;
		delete[] cpuOutputBuffer_;
		free(gpuBuffers_[0]);
		free(gpuBuffers_[1]);
		delete meiExecutionContext_;
		delete meiCudaEngine_;
		delete meiRuntime_;
		delete[] modelStream_;
	}

	// 读取模型,反序列化成engine
	void loadEngine()
	{
		std::ifstream inputFileStream(enginePath_, std::ios::binary);
		size_t engineSize{0};
		if (inputFileStream.good())
		{
			inputFileStream.seekg(0, inputFileStream.end);
			engineSize = inputFileStream.tellg();
			inputFileStream.seekg(0, inputFileStream.beg);
			modelStream_ = new char[engineSize];
			assert(modelStream_);
			inputFileStream.read(modelStream_, engineSize);
			inputFileStream.close();
		}
		meiRuntime_ = createInferRuntime(meiLogger);
		assert(meiRuntime_ != nullptr);
		meiCudaEngine_ = meiRuntime_->deserializeCudaEngine(modelStream_, engineSize);//反序列化
		assert(meiCudaEngine_ != nullptr);
		meiExecutionContext_ = meiCudaEngine_->createExecutionContext();
		assert(meiExecutionContext_ != nullptr);
	}

	//分配相关内存
	void initBuffers()
	{
		outputSize_ = getoutputSize();
		//输入输出层的名字根据网络结构确定
		const int inputIndex = meiCudaEngine_->getBindingIndex("images");
		const int outputIndex = meiCudaEngine_->getBindingIndex("output0");
		assert(inputIndex == 0);
		assert(outputIndex == 1);
		cudaMalloc((void **) &gpuBuffers_[inputIndex], inputSize_ * sizeof(float));//分配显存(接收主机内存输入)
		cudaMalloc((void **) &gpuBuffers_[outputIndex], outputSize_ * sizeof(float));//分配显存(向主机内存输出)
		cudaStreamCreate(&meiCudaStream_);
		cpuOutputBuffer_ = new float[outputSize_]();//主机内存接收来自GPU的推理结果
	}

	//图像预处理
	void imgProcess(Mat inputImg)
	{
		//缩放与填充
		imgRatio_ = std::min((inputWidth_ * 1.) / inputImg.cols, (inputHeight_ * 1.) / inputImg.rows);
		int borderWidth = inputImg.cols * imgRatio_;
		int borderHeight = inputImg.rows * imgRatio_;
		offsetX_ = (inputWidth_ - borderWidth) / 2;
		offsetY_ = (inputHeight_ - borderHeight) / 2;
		resize(inputImg, inputImg, Size(borderWidth, borderHeight));
		//填充纯色像素,使原图变为网络输入大小
		copyMakeBorder(inputImg, inputImg, offsetY_, offsetY_, offsetX_, offsetX_, BORDER_CONSTANT, Scalar(255, 255, 255));
		cvtColor(inputImg, inputImg, COLOR_BGR2RGB);

		//转CHW与归一化
		int channels = inputImg.channels();
		int height = inputImg.rows;
		int width = inputImg.cols;
		for (int c = 0; c < channels; ++c)
		{
			for (int h = 0; h < height; ++h)
			{
				for (int w = 0; w < width; ++w)
				{
					inputBlob_[c * width * height + h * width + w] = inputImg.at<Vec3b>(h, w)[c] / 255.0f;
				}
			}
		}
	}

	// 推理
	void infer()
	{
		cudaMemcpyAsync(gpuBuffers_[0], inputBlob_, inputSize_ * sizeof(float), cudaMemcpyHostToDevice, meiCudaStream_);//输入数据传入显存
		auto start = std::chrono::system_clock::now();//计时器
		meiExecutionContext_->enqueueV2((void **) gpuBuffers_, meiCudaStream_, nullptr);//异步推理
		cudaMemcpyAsync((void *) cpuOutputBuffer_, gpuBuffers_[1], outputSize_ * sizeof(float), cudaMemcpyDeviceToHost, meiCudaStream_);//推理数据传出显存
		cudaStreamSynchronize(meiCudaStream_);//流同步
		auto end = std::chrono::system_clock::now();

		std::cout << "inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
	}

	//处理推理数据
	void postProcess()
	{
		detectedBalls_.clear();
		pickedBalls_.clear();
		float *ptr = cpuOutputBuffer_;
		for (int i = 0; i < outputMaxNum_; ++i)
		{
			float objectness = ptr[4];//网格单元中存在物体的概率(置信度)
			if (objectness >= minObjectness_)
			{
				int label = std::max_element(ptr + 5, ptr + 85) - (ptr + 5);
				float confidence = ptr[label + 5] * objectness;//该物体属于某个标签类别的概率(置信度)

				if (confidence >= minConfidence_)
				{
					Ball ball = Ball(ptr[0] / imgRatio_, ptr[1] / imgRatio_, label, confidence);
					ball.width = ptr[2] / imgRatio_;
					ball.height = ptr[3] / imgRatio_;
					ball.x = ball.centerX_ - ball.width * 0.5 - offsetX_ / imgRatio_;//减去填充像素
					ball.y = ball.centerY_ - ball.height * 0.5 - offsetY_ / imgRatio_;
					detectedBalls_.push_back(ball);
				}
			}
			ptr += 85;
		}
		std::cout << "found " << detectedBalls_.size() << " objects" << std::endl;

		//NMS 防止出现大框套小框
		for (int i = 0; i < detectedBalls_.size(); ++i)
		{
			bool pick = true;
			for (int j = 0; j < pickedBalls_.size(); ++j)
			{
				if (calcIou(detectedBalls_.at(i), pickedBalls_.at(j)) > maxIou_)//两框重叠程度太高就抛弃一个
				{
					pick = false;
				}
			}
			if (pick)
			{
				pickedBalls_.push_back(detectedBalls_.at(i));
			}
		}
		std::cout << "picked " << pickedBalls_.size() << " objects" << std::endl;
	}

	void drawBox(Mat img)
	{
		for (int i = 0; i < pickedBalls_.size(); ++i)
		{
			rectangle(img, pickedBalls_.at(i), Scalar(0, 255, 0), 2);
			putText(img, std::to_string(pickedBalls_.at(i).labelNum_) + " " + std::to_string(pickedBalls_.at(i).confidence_), Point(pickedBalls_.at(i).x, pickedBalls_.at(i).y),
			        FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 0, 0), 2);
		}
	}
};

class rsImgLoader
{
private:
	int cameraId_;
	int imgWidth_;
	int imgHeight_;
	int framerate_;
	rs2::context context_;
	rs2::device_list deviceList_;
	rs2::config config_;
	rs2::align myAlign_ = rs2::align(RS2_STREAM_COLOR);// 对齐的是彩色图,所以彩色图是不变的
	rs2::pipeline pipe_;
	rs2::colorizer colorizer_;
public:
	rsImgLoader(int cameraId,int imgWidth,int imgHeight, int framerate) : cameraId_(cameraId),imgWidth_(imgWidth),imgHeight_(imgHeight),framerate_(framerate)
	{};
	~rsImgLoader()
	{
		pipe_.stop();
	}

	void init()
	{
		deviceList_=context_.query_devices();
		if (cameraId_>=deviceList_.size())
		{
			std::cerr<<deviceList_.size()<<" camera(s) detected. No camera with id "<<cameraId_<<std::endl;
			exit(-1);
		}
		else
		{
			std::cout<<"camera "<<cameraId_<<" connected"<<std::endl;
		}

		config_.enable_device(deviceList_[cameraId_].get_info(RS2_CAMERA_INFO_SERIAL_NUMBER));
		config_.enable_stream(RS2_STREAM_COLOR,imgWidth_,imgHeight_,RS2_FORMAT_BGR8,framerate_);
		config_.enable_stream(RS2_STREAM_DEPTH,imgWidth_,imgHeight_,RS2_FORMAT_Z16,framerate_);
		pipe_.start(config_);
	}

	void getImg(Mat *colorImg, Mat *depthImg)
	{
		rs2::frameset frameSet=pipe_.wait_for_frames();
		rs2::frame colorFrame=frameSet.get_color_frame();
		frameSet=myAlign_.process(frameSet);
		colorFrame=frameSet.get_color_frame();
		rs2::frame depthFrame=frameSet.get_depth_frame().apply_filter(colorizer_);

		*colorImg=Mat(Size(imgWidth_,imgHeight_),CV_8UC3,(void *)colorFrame.get_data(),Mat::AUTO_STEP);
		*depthImg=Mat(Size(imgWidth_,imgHeight_),CV_8UC3,(void *)depthFrame.get_data(),Mat::AUTO_STEP);
	}
};

int main()
{
	meiTrtDetector trtDetector("yolov5s-fp16.engine");
	Mat colorImg, depthImg;
	Mat colorImg2, depthImg2;
	rsImgLoader imgLoader(0,640,480,30);
	rsImgLoader imgLoader2(1,640,480,30);
	imgLoader.init();
	imgLoader2.init();

	trtDetector.loadEngine();
	trtDetector.initBuffers();
	while (true)
	{
		imgLoader.getImg(&colorImg,&depthImg);
		if (colorImg.empty())
		{
			continue;
		}
		trtDetector.imgProcess(colorImg);
		trtDetector.infer();
		trtDetector.postProcess();
		trtDetector.drawBox(colorImg);
		imshow("colorImg", colorImg);
		imshow("depthImg",depthImg);

		imgLoader2.getImg(&colorImg2,&depthImg2);
		if (colorImg2.empty())
		{
			continue;
		}
		trtDetector.imgProcess(colorImg2);
		trtDetector.infer();
		trtDetector.postProcess();
		trtDetector.drawBox(colorImg2);
		imshow("colorImg2", colorImg2);
		imshow("depthImg2",depthImg2);
		if (waitKey(5) == 27)
		{
			break;
		}
	}
//	Mat colorImg = imread("22.jpg");

//	waitKey(0);

	return 0;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值