opencv dnn模块 示例(8) 语义分割 segmentation(ENet/fcn8s)

一、opencv的示例模型文件

opencv的dnn模块读取models.yml文件中包含的目标检测模型有2种,
ENet road scene segmentation network from https://github.com/e-lab/ENet-training
Works fine for different input sizes.

  • enet:
    model: “Enet-model-best.net”
    mean: [0, 0, 0]
    scale: 0.00392
    width: 512
    height: 256
    rgb: true
    classes: “enet-classes.txt”
    sample: “segmentation”
  • fcn8s:
    model: “fcn8s-heavy-pascal.caffemodel”
    config: “fcn8s-heavy-pascal.prototxt”
    mean: [0, 0, 0]
    scale: 1.0
    width: 500
    height: 500
    rgb: false
    sample: “segmentation”

提供csdn下的enet 下载链接

二、示例代码

#include <fstream>
#include <sstream>

#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

#include <iostream>

using namespace cv;
using namespace dnn;

std::vector<std::string> classes;
std::vector<Vec3b> colors;

void showLegend();

void colorizeSegmentation(const Mat &score, Mat &segm);

int main(int argc, char** argv) try {
	//	VGG - based FCN(semantical segmentation network)
	//	ENet(lightweight semantical segmentation network)

	// 根据选择的检测模型文件进行配置 
	float confThreshold, nmsThreshold, scale;
	cv::Scalar mean;
	bool swapRB;
	int inpWidth, inpHeight;

	String modelPath, configPath, classesFile;

	int modelType = 1;  // 0-fcn  1-enet

	if (modelType == 0){
		confThreshold = 0.5;
		nmsThreshold = 0.4;
		scale = 1.0;
		mean = Scalar{ 0,0,0 };
		swapRB = false;
		inpWidth = 500;
		inpHeight = 500;

		modelPath = "../../data/testdata/dnn/fcn8s-heavy-pascal.caffemodel";
		configPath = "../../data/testdata/dnn/fcn8s-heavy-pascal.prototxt";
		classesFile = "../../data/dnn/object_detection_classes_pascal_voc.txt";
	}
	else if (modelType == 1){

		confThreshold = 0.5;
		nmsThreshold = 0.4;
		scale = 0.00392;
		mean = Scalar{ 0,0,0 };
		swapRB = false;
		inpWidth = 512;
		inpHeight = 256;

		modelPath = "../../data/testdata/dnn/Enet-model-best.net";
		configPath = "";
		classesFile = "../../data/dnn/enet-classes.txt";
	}

	String colorFile = "";

	String framework = "";

	int backendId = cv::dnn::DNN_BACKEND_OPENCV;
	int targetId = cv::dnn::DNN_TARGET_CPU;

	// Open file with classes names.
	if (!classesFile.empty()) {
		const std::string file = classesFile;
		std::ifstream ifs(file.c_str());
		if (!ifs.is_open())
			CV_Error(Error::StsError, "File " + file + " not found");
		std::string line;

		if (modelType == 0)
			classes.push_back("background"); //使用的是object_detection_classes,需要增加背景;   enet不需要,注释该行

		while (std::getline(ifs, line)) {
			classes.push_back(line);
		}
	}

	if (!colorFile.empty()) {
		const std::string file = colorFile;
		std::ifstream ifs(file.c_str());
		if (!ifs.is_open())
			CV_Error(Error::StsError, "File " + file + " not found");
		std::string line;
		while (std::getline(ifs, line)) {
			std::istringstream colorStr(line.c_str());
			Vec3b color;
			for (int i = 0; i < 3 && !colorStr.eof(); ++i)
				colorStr >> color[i];
			colors.push_back(color);
		}
	}


	CV_Assert(!modelPath.empty());
	//! [Read and initialize network]
	Net net = readNet(modelPath, configPath, framework);
	net.setPreferableBackend(backendId);
	net.setPreferableTarget(targetId);
	//! [Read and initialize network]

	// Create a window
	static const std::string kWinName = "Deep learning semantic segmentation in OpenCV";
	namedWindow(kWinName, WINDOW_AUTOSIZE);

	//! [Open a video file or an image file or a camera stream]
	VideoCapture cap;
	//cap.open("../../data/image/person.jpg");                       // pascal voc 	
	cap.open("G:/Datasets/Cityscapes/aachen_%06d_000019.jpg");   // enet Cityscapes

	if (!cap.isOpened()) {
		std::cout << "VideoCapture open failed." << std::endl;
		return 0;
	}

	//! [Open a video file or an image file or a camera stream]

	// Process frames.
	Mat frame, blob;
	while (waitKey(1) < 0) {
		cap >> frame;
		if (frame.empty()) {
			waitKey();
			break;
		}

		//! [Create a 4D blob from a frame]
		blobFromImage(frame, blob, scale, Size(inpWidth, inpHeight), mean, swapRB, false);
		//! [Create a 4D blob from a frame]

		//! [Set input blob]
		net.setInput(blob);
		//! [Set input blob]
		//! [Make forward pass]
		Mat score = net.forward();
		//! [Make forward pass]

		Mat segm;
		colorizeSegmentation(score, segm);

		resize(segm, segm, frame.size(), 0, 0, INTER_NEAREST);
		addWeighted(frame, 0.1, segm, 0.9, 0.0, frame);

		// Put efficiency information.
		std::vector<double> layersTimes;
		double freq = getTickFrequency() / 1000;
		double t = net.getPerfProfile(layersTimes) / freq;
		std::string label = format("Inference time: %.2f ms", t);
		putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));

		imshow(kWinName, frame);

		if (!classes.empty())
			showLegend();

	}
	return 0;
}
catch (std::exception & e) {
	std::cerr << e.what() << std::endl;
}

void colorizeSegmentation(const Mat &score, Mat &segm)
{
	const int rows = score.size[2];
	const int cols = score.size[3];
	const int chns = score.size[1];

	if (colors.empty()) {
		// Generate colors.
		colors.push_back(Vec3b());
		for (int i = 1; i < chns; ++i) {
			Vec3b color;
			for (int j = 0; j < 3; ++j)
				color[j] = (colors[i - 1][j] + rand() % 256) / 2;
			colors.push_back(color);
		}
	}
	else if (chns != (int)colors.size()) {
		CV_Error(Error::StsError, format("Number of output classes does not match "
			"number of colors (%d != %zu)", chns, colors.size()));
	}

	Mat maxCl = Mat::zeros(rows, cols, CV_8UC1);
	Mat maxVal(rows, cols, CV_32FC1, score.data);
	for (int ch = 1; ch < chns; ch++) {
		for (int row = 0; row < rows; row++) {
			const float *ptrScore = score.ptr<float>(0, ch, row);
			uint8_t *ptrMaxCl = maxCl.ptr<uint8_t>(row);
			float *ptrMaxVal = maxVal.ptr<float>(row);
			for (int col = 0; col < cols; col++) {
				if (ptrScore[col] > ptrMaxVal[col]) {
					ptrMaxVal[col] = ptrScore[col];
					ptrMaxCl[col] = (uchar)ch;
				}
			}
		}
	}

	segm.create(rows, cols, CV_8UC3);
	for (int row = 0; row < rows; row++) {
		const uchar *ptrMaxCl = maxCl.ptr<uchar>(row);
		Vec3b *ptrSegm = segm.ptr<Vec3b>(row);
		for (int col = 0; col < cols; col++) {
			ptrSegm[col] = colors[ptrMaxCl[col]];
		}
	}
}

void showLegend()
{
	static const int kBlockHeight = 30;
	static Mat legend;
	if (legend.empty()) {
		const int numClasses = (int)classes.size();
		if ((int)colors.size() != numClasses) {
			CV_Error(Error::StsError, format("Number of output classes does not match "
				"number of labels (%zu != %zu)", colors.size(), classes.size()));
		}
		legend.create(kBlockHeight * numClasses, 200, CV_8UC3);
		for (int i = 0; i < numClasses; i++) {
			Mat block = legend.rowRange(i * kBlockHeight, (i + 1) * kBlockHeight);
			block.setTo(colors[i]);
			putText(block, classes[i], Point(0, kBlockHeight / 2), FONT_HERSHEY_SIMPLEX, 0.5, Vec3b(255, 255, 255));
		}
		namedWindow("Legend", WINDOW_AUTOSIZE);
		imshow("Legend", legend);
	}
}

3、示例

(1) fcn8s-heavy-pascal 测试结果

person.jpg 原图,图例,结果图如下 (opencl 比 cpu慢2倍…)
在这里插入图片描述在这里插入图片描述
另外两张图结果
在这里插入图片描述

(2)Enet-model 测试结果

(opencl 比 cpu快3倍… 无语…)
在这里插入图片描述

在这里插入图片描述
在这里插入图片描述

  • 9
    点赞
  • 59
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 61
    评论
下面是一个简单的 C++ OpenCV DNN 推理代码示例,使用 ONNX 格式的 U-Net 模型进行语义分割: ```c++ #include <opencv2/dnn/dnn.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> using namespace cv; using namespace cv::dnn; int main(int argc, char** argv) { // 读取模型文件 const string model_file_path = "unet.onnx"; Net net = readNetFromONNX(model_file_path); // 读取输入图像 const string input_file_path = "input.jpg"; Mat input_image = imread(input_file_path); // 预处理输入图像 Mat input_blob = blobFromImage(input_image, 1.0 / 255.0, Size(572, 572), Scalar(0, 0, 0), true, false); // 运行推理 Mat output_blob; net.setInput(input_blob); net.forward(output_blob); // 后处理输出结果 Mat output_image; output_blob = output_blob.reshape(1, 388 * 388); output_blob.convertTo(output_blob, CV_8UC1, 255.0); applyColorMap(output_blob, output_image, COLORMAP_JET); // 显示输出结果 imshow("Output", output_image); waitKey(0); return 0; } ``` 这个示例代码假设已经有了一个 ONNX 格式的 U-Net 模型文件 `unet.onnx` 和一个输入图像文件 `input.jpg`。代码中首先使用 `readNetFromONNX` 函数读取了模型文件,然后使用 `imread` 函数读取了输入图像。 接下来,代码中使用 `blobFromImage` 函数将输入图像转换成网络需要的输入格式,并使用 `setInput` 函数将输入数据设置到网络中,使用 `forward` 函数进行推理,得到输出结果。 最后,代码中使用 `reshape` 函数和 `convertTo` 函数对输出结果进行后处理,然后使用 `applyColorMap` 函数将结果可视化,使用 `imshow` 函数显示输出结果,使用 `waitKey` 函数等待用户按下键盘。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 61
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

aworkholic

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值