读取内存中的图片并使用opencv调用darknet模型分类

读取mtsd文件,并调用模型预测

void CTestDllDlg::OnBnClickedButton1()
{
	// TODO: 在此添加控件通知处理程序代码
	CGetFilePath mGetFilePath;

	vector<string> m_vSrcImgPath;
	string path = "F:/1234/171220184711_00002";
	mGetFilePath.get_AllImg_FileNames(path, m_vSrcImgPath);

	int m_iImgDataW = 500;
	int m_iImgDataH = 860;
	BYTE** m_ppImgData = NULL;
	if (m_ppImgData != NULL)
		ReSize2dDataPointer(m_ppImgData, m_iImgDataW*m_iImgDataH, 0, 0);

	ReSize2dDataPointer(m_ppImgData, 0, m_iImgDataW*m_iImgDataH, m_vSrcImgPath.size());

	for (int i = 0; i < m_vSrcImgPath.size(); i++)
	{
		CString m_strImgPath = StringAbout::toCString(m_vSrcImgPath[i]);
		CFileFind findFile;
		if (!findFile.FindFile(m_strImgPath))
		{
			return;
		}

		BYTE* pOctdata = new BYTE[m_iImgDataW * m_iImgDataH];
		memset(pOctdata, 0, sizeof(BYTE));

		DWORD dwReadBytes = CProcessFile::ReadBinaryFile(&pOctdata[0],
			m_iImgDataW * m_iImgDataH, m_strImgPath);

		if (m_iImgDataW * m_iImgDataH != dwReadBytes)
		{
			return; //读取图形数据失败
		}

		for (int j = 0; j < m_iImgDataW * m_iImgDataH;j++)
		{
			m_ppImgData[i][j] = pOctdata[j];

		}

	}

	//DllBase* m_dll = GetDllBase();
	m_dll.LoadNet();
	String model = "E:/_darknet/x64/data/METAL/darknet19_448.cfg";
	String weights = "E:/_darknet/x64/backup/_darknet19_448_final.weights";
	string label = "E:/_darknet/x64/data/METAL/labels.txt";
	LoadNet(model, weights, label);
	ForCast(m_ppImgData, m_vSrcImgPath.size(), m_iImgDataW, m_iImgDataH);
}

加载模型:

void CTestDllDlg::LoadNet(String model, String weights, String label)
{
	//加载网络模



	net = readNetFromDarknet(model, weights);
	if (net.empty())
	{
		printf("Could not load net...\n");
		//return 0;
	}

	/*要求网络在支持的地方使用特定的计算后端
	*如果opencv是用intel的推理引擎库编译的,那么dnn_backend_default意味着dnn_backend_interrusion_引擎
	*否则等于dnn_backend_opencv。
	*/
	net.setPreferableBackend(DNN_BACKEND_OPENCV);
	//要求网络对特定目标设备进行计算
	net.setPreferableTarget(DNN_TARGET_CPU);


	//2.加载分类信息
	//vector<string> classNamesVec;

	ifstream classNamesFile(label.c_str());
	if (classNamesFile.is_open())
	{
		string className = "";
		while (std::getline(classNamesFile, className))
			m_classes.push_back(className);
	}
}

预测内存图片

void CTestDllDlg::ForCast(BYTE** mImgs, int img_size, int width, int height)
{

	std::vector<Mat> m_vImgs;
	unsigned char* tmpImg = new unsigned char[height*width*3];
	/*for (int i = 0; i < height; i++)
	{
		tmpImg[i] = new unsigned char[width];
	}*/

	for (int i = 0; i < 1/*img_size*/; i++)
	{
		
		int iCount = 3;
		int iPos = 0;
		int iWNum = width * 3;
		int k = 0;
		for (int n = 0; n< width; ++n)
		{
			iPos = n * iCount;
			for (int m = 0; m< height; ++m)
			{
				k = m * iWNum + iPos;
				tmpImg[k] = (BYTE)mImgs[i][n * height + m];
				tmpImg[k + 1] = (BYTE)mImgs[i][n * height + m];
				tmpImg[k + 2] = (BYTE)mImgs[i][n * height + m];

			}
		}
		Mat mimg=Mat(height, width, CV_8UC3, (unsigned char*)tmpImg).clone();
		m_vImgs.push_back(mimg);
		imshow("test", mimg);
		waitKey();
	}
	
//	BYTE *pdata = new BYTE[width * height * 3];
	/*for (int i = 0; i < width; i++)
	{
		delete[] tmpImg[i];
	}*/
	delete[] tmpImg;

	Mat inputBlob = blobFromImage(m_vImgs[0], 1 / 255.F, Size(width, height), Scalar(), true, false);


	net.setInput(inputBlob, "data");

	//4.检测和显示
	//获得“dectection_out"的输出
	vector<Mat> outs;
	net.forward(outs, getOutputsNames(net));

	vector<double> layersTimings;
	double freq = getTickFrequency() / 1000;
	double time = net.getPerfProfile(layersTimings) / freq;

	//如果是目标检测任务绘制矩形框
	for (int i = 0; i < outs.size(); i++)
	{
		//reshape the blob to 1x1000 matrix // 1000个分类
		Mat probMat = outs[i].reshape(1, 1);
		Point classNumber;
		double classProb;
		// 可能性最大的一个
		minMaxLoc(probMat, NULL, &classProb, NULL, &classNumber);
		// 分类索引号
		int classIdx = classNumber.x;
		printf("current image classification : %s, possible : %.8f \n", m_classes.at(classIdx).c_str(), classProb);

		string label = format("class: ") + m_classes.at(classIdx) + format(" ,time: %.2f ms", time);
		putText(m_vImgs[i], label, Point(20, 20), 0, 0.5, Scalar(0, 0, 255));
		string winName = format("OCT%d", i);
		namedWindow(winName);
		imshow(winName, m_vImgs[i]);
		waitKey();
	}

}

获得模型各层ming名称

vector<String> CTestDllDlg::getOutputsNames(Net&net)
{
	static vector<String> names;
	if (names.empty())
	{
		//返回加载模型中所有层的输入和输出形状(shape)
		vector<int> outLayers = net.getUnconnectedOutLayers();

		//get the names of all the layers in the network
		vector<String> layersNames = net.getLayerNames();

		// Get the names of the output layers in names
		names.resize(outLayers.size());
		for (size_t i = 0; i < outLayers.size(); ++i)
			names[i] = layersNames[outLayers[i] - 1];
	}
	return names;
}

 

使用OpenCV调用YOLOv模型的步骤如下: 1. 下载并安装Darknet。在Darknet官网(https://github.com/pjreddie/darknet)上下载源代码,并进行编译和安装。 2. 下载YOLOv模型权重文件。在Darknet官网上下载YOLOv模型的权重文件,例如yolov3.weights。 3. 使用OpenCV读取图像文件或视频文件。使用OpenCV的cv::imread()函数读取图像文件,或使用cv::VideoCapture类读取视频文件。 4. 加载YOLOv模型和权重文件。使用OpenCV的dnn::readNetFromDarknet()函数加载YOLOv模型的配置文件和权重文件。 ```python model_cfg = "path/to/yolov3.cfg" model_weights = "path/to/yolov3.weights" net = cv2.dnn.readNetFromDarknet(model_cfg, model_weights) ``` 5. 预处理图像。将图像缩放到模型需要的大小,并对图像进行归一化处理。 ```python blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False) ``` 6. 将图像输入到模型进行推理。使用OpenCV的dnn::Net类的forward()函数进行推理,并获取模型输出。 ```python net.setInput(blob) outputs = net.forward() ``` 7. 处理模型输出。对模型输出进行解析和后处理,获取目标检测结果。 ```python for output in outputs: for detection in output: scores = detection[5:] class_id = np.argmax(scores) confidence = scores[class_id] if confidence > 0.5: center_x = int(detection[0] * width) center_y = int(detection[1] * height) w = int(detection[2] * width) h = int(detection[3] * height) x = int(center_x - w/2) y = int(center_y - h/2) cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2) ``` 8. 显示图像。使用OpenCV的cv::imshow()函数显示图像,并使用cv::waitKey()函数等待键盘输入。 ```python cv2.imshow("Output", image) cv2.waitKey(0) cv2.destroyAllWindows() ``` 以上就是使用OpenCV调用YOLOv模型的基本步骤,希望对你有所帮助。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

haimianjie2012

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值