yolov5实例分割推理(opencvDNN模块)

通过yolov5训练后得到的pt文件,通过export导出可以用于部署的onnx文件 

 python .\export.py --weights gaoqing640.pt --include onnx --imgsz 640 --opset 12

通过netron软件可以看到模型的两个输出,output0是检测框,output1是mask

1.将 output0 的25200(对640*640的图片的每个通道进行8、16、32倍采样,80*80*3+40*40*3+20*20*3)的每个检测框的 38 取最后32位,转为一维矩阵 float[],得到矩阵1,是1行,32列。

2.将 output1 的 1*32*160*160 转换成二维矩阵 32*25600 的 float[][],相当于把160*160展平,得到矩阵2,是 32行、25600列,

3.将矩阵2和矩阵1相乘,1*32 乘 32*25600 ,得到矩阵3,是 1*25600。

4.将矩阵3转换为矩阵 160*160,再进行插值扩展到原始图像大小得到矩阵4。

5.将矩阵4每个元素进行sigmod以及二值化。根据目标检测框标注、就可以看到掩膜信息。

完整代码如下 

#include<iostream>
#include<opencv2\opencv.hpp>
#include<opencv2\dnn.hpp>
#include<fstream>
using namespace std;
using namespace cv;
using namespace dnn;

const std::vector<cv::Scalar> colors = { cv::Scalar(255, 255, 0), cv::Scalar(255, 255, 255), cv::Scalar(0, 255, 255), cv::Scalar(255, 0, 0) };
const float SCORE_THRESHOLD = 0.2;
const float CONFIDENCE_THRESHOLD = 0.6;
const float NMS_THRESHOLD = 0.4;

struct Detection
{
	int class_id;
	float confidence;
	cv::Rect box;
	//cv::Mat boxMask;
};
struct MaskParams {
	int segChannels = 32;
	int segWidth = 160;
	int segHeight = 160;
	int netWidth = 640;
	int netHeight = 640;
	float maskThreshold = 0.5;
	cv::Size srcImgShape;
	cv::Vec4d params;
};
int main() {
	Net net = readNetFromONNX("seg.onnx");
	net.setPreferableBackend(DNN_BACKEND_DEFAULT);
	net.setPreferableTarget(DNN_TARGET_CPU);
	
	String path = "C:/Users/Administrator/Documents/visual studio 2015/Projects/test/test/seg/*.jpg";
	vector<String> images;
	glob(path, images, false);
	for (size_t i = 0; i < images.size(); i++)
	{
		Mat mat = imread(images[i]);
		Mat img_show = mat.clone();
		cv::Mat blob = cv::dnn::blobFromImage(mat, 1.0 / 255, cv::Size(640, 640), cv::Scalar(0, 0, 0), true);
		net.setInput(blob);
		std::vector<cv::Mat>outputs;
		vector<string> output_layer_names{ "output0","output1" };
		net.forward(outputs,output_layer_names);
		float* data = (float*)outputs[0].data;
		//输出的维度为classsize+5(x,y,w,h,poinconf)+32即38
		const int dimensions = 38;
		const int rows = 25200;
		std::vector<int> class_ids;
		std::vector<float> confidences;
		std::vector<cv::Rect> boxes;
		std::vector<vector<float>> picked_proposals;  //output0[:,:, 5 + _className.size():net_width]===> for mask

		for (int i = 0; i < rows; ++i) {
			float confidence = data[4];
			if (confidence >= CONFIDENCE_THRESHOLD) {

				float* classes_scores = data + 5;
				cv::Mat scores(1, 1, CV_32FC1, classes_scores);
				cv::Point class_id;
				double max_class_score;
				minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
				if (max_class_score > SCORE_THRESHOLD) {

					confidences.push_back(confidence);
					vector<float> temp_proto(data + 5 + 1, data + 38);
					picked_proposals.push_back(temp_proto);
					class_ids.push_back(class_id.x);
					float x = data[0];
					float y = data[1];
					float w = data[2];
					float h = data[3];
					int left = int(x - 0.5 * w);
					int top = int(y - 0.5 * h);
					int width = int(w);
					int height = int(h);
					boxes.push_back(cv::Rect(left, top, width, height));
				}
			}
			//步长为38
			data += 38;
		}
		std::vector<int> nms_result;
		cv::dnn::NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, nms_result);
		std::vector<vector<float>> temp_mask_proposals;
		std::vector<Detection> output;
		for (int i = 0; i < nms_result.size(); i++) {
			int idx = nms_result[i];
			Detection result;
			result.class_id = class_ids[idx];
			result.confidence = confidences[idx];
			result.box = boxes[idx];
			output.push_back(result);
			//32位掩码
			temp_mask_proposals.push_back(picked_proposals[idx]);		
		}
		//开始计算mask
		Vec4d params;
		MaskParams maskParams;
		maskParams.params = params;
		maskParams.srcImgShape = mat.size();
		//vector<Mat> allMask;
		Mat mask;
		Mat final_mask(640, 640, CV_8UC3);
		for (int j = 0; j < temp_mask_proposals.size(); ++j) {
			int seg_channels = 32;
			int net_width =640;
			int seg_width = 160;
			int net_height =640;
			int seg_height = 160;
			float mask_threshold = 0.5;
			params = maskParams.params;
			Size src_img_shape = maskParams.srcImgShape;
			Mat protos = outputs[1].reshape(0, { seg_channels,seg_width * seg_height });
			cout << "protos" << protos.rows << "   " << protos.cols << endl;
			Mat maskProposals = Mat(temp_mask_proposals[j]).t();
			maskProposals = maskProposals.clone();
			Mat matmul_res = (maskProposals* protos).t();
			Mat masks = matmul_res.reshape(1, { seg_width,seg_height });
			cout << masks.channels();
			Mat dest;
			//sigmoid
			cv::exp(-masks, dest);
			dest = 1.0 / (1.0 + dest);
			resize(dest, mask, Size(640, 640), INTER_NEAREST);
			cout << mask.channels() << endl;	
			for (int w = 0; w < mask.rows; w++)
			{
				for (int q = 0;q < mask.cols;q++)
				{
					float point = mask.at<float>(w, q);
					//cout << point << endl;
					if (point>0.9)
					{
						final_mask.at<Vec3b>(w, q) = cv::Vec3b(0, 255, 255);
					}
				}
			}
		}
		int detections = output.size();
		resize(img_show, img_show, Size(640, 640));
		Mat final_img;
		Mat d;
		for (int i = 0; i < detections; ++i)
		{
			auto detection = output[i];
			auto box = detection.box;
			auto classId = detection.class_id;
			auto color = colors[classId % colors.size()];
			cv::rectangle(img_show, box, color, 3);
			cout << box << endl;
			d = img_show(Rect(box));
			cout << final_mask.size()<< img_show.size() << endl;
			//addWeighted(img_show, 0.6, d, 0.3, 0, final_img);
			img_show(box).setTo(classId % colors.size(), final_mask(box));
		}
		cv::imshow("output", img_show);
		waitKey();
	}
}







  • 6
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值