opencv realsense 深彩对齐

realsense 深彩对齐

#include <iostream>
#include <opencv2/opencv.hpp>
#include <librealsense2/rs.hpp>
#include <sstream>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <cstring>
#include <iostream>
#include "omp.h"

using namespace std;
using namespace cv;
using namespace rs2;
Mat  image;

std::string file_name;
int  usart_rt_flag;
float finalad = 0;
int countt;
int j = 0;
float ad = 0;
int acount = 0;
clock_t time1, time2;//计算fps变量

//获取深度像素对应长度单位转换
float get_depth_scale(device dev);
float offset = 0;//偏移量
//检查摄像头数据管道设置是否改变
bool profile_changed(const vector<stream_profile> & current, const vector<stream_profile>& prev);
enum class direction
{
	to_depth,
	to_color
};
void remove_background(rs2::video_frame& other_frame, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist);
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams);

int main() try
{
	//声明彩色图
	colorizer color_map;
	//声明realsense管道
	pipeline pipe;
	
	//数据流配置信息
	config pipe_config;
	pipe_config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 60);
	pipe_config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_BGR8, 60);

	//开始传送数据流
	pipeline_profile profile = pipe.start(pipe_config);
	float depth_scale = get_depth_scale(profile.get_device());
	rs2_stream align_to = find_stream_to_align(profile.get_streams());
	rs2::align align(align_to);

	const char* color_win = "深彩对齐";
	namedWindow(color_win, WINDOW_AUTOSIZE);

	//使用数据管道的profile获取深度图像像素对应于长度单位(米)的转换比例
	float depth_scale1 = get_depth_scale(profile.get_device());
	//选择彩色图像数据流来作为对齐对象

	//定义一个变量去转换深度到距离
	float depth_clipping_distance = 0.9f;
	direction   dir = direction::to_depth; //对齐方向


	while (waitKey(1) && cvGetWindowHandle(color_win)) {
		time1 = clock();//开始计时
		frameset data = pipe.wait_for_frames();//等待下一帧
		frame color = data.get_color_frame();//获取宽高

		align_to = find_stream_to_align(profile.get_streams());
		align = rs2::align(align_to);
		depth_scale1 = get_depth_scale(profile.get_device());

		auto processed = align.process(data);

		rs2::video_frame other_frame = processed.first(align_to);
		rs2::depth_frame aligned_depth_frame = processed.get_depth_frame();

		//profile = pipe.get_active_profile();

		remove_background(other_frame, aligned_depth_frame, depth_scale1, depth_clipping_distance);

		const int color_w = color.as<video_frame>().get_width();
		const int color_h = color.as<video_frame>().get_height();

		// Create OpenCV matrix of size (w,h) from the colorized depth data
		Mat image(Size(640, 480), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);

		imshow("1", image);
		image.release();

	}

	return EXIT_SUCCESS;
}
catch (const rs2::error &e) {
	cout << "RealSense error calling" << e.get_failed_function() << "(" << e.get_failed_args() << "):\n"
		<< e.what() << endl;
	return EXIT_FAILURE;
}

catch (const exception &e) {
	cout << e.what() << endl;
	return EXIT_FAILURE;
}

float get_depth_scale(rs2::device dev)
{
	// Go over the device's sensors
	for (rs2::sensor& sensor : dev.query_sensors())
	{
		// Check if the sensor if a depth sensor
		if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>())
		{
			return dpt.get_depth_scale();
		}
	}
	throw std::runtime_error("Device does not have a depth sensor");
}

bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev)
{
	for (auto&& sp : prev)
	{
		//If previous profile is in current (maybe just added another)
		auto itr = std::find_if(std::begin(current), std::end(current), [&sp](const rs2::stream_profile& current_sp) { return sp.unique_id() == current_sp.unique_id(); });
		if (itr == std::end(current)) //If it previous stream wasn't found in current
		{
			return true;
		}
	}
	return false;
}

void remove_background(rs2::video_frame& other_frame, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist)
{
	const uint16_t* p_depth_frame = reinterpret_cast<const uint16_t*>(depth_frame.get_data());
	uint8_t* p_other_frame = reinterpret_cast<uint8_t*>(const_cast<void*>(other_frame.get_data()));

	int width = other_frame.get_width();
	int height = other_frame.get_height();
	int other_bpp = other_frame.get_bytes_per_pixel();

#pragma omp parallel for schedule(dynamic) //Using OpenMP to try to parallelise the loop
	for (int y = 0; y < height; y++)
	{
		auto depth_pixel_index = y * width;
		for (int x = 0; x < width; x++, ++depth_pixel_index)
		{
			// Get the depth value of the current pixel
			auto pixels_distance = depth_scale * p_depth_frame[depth_pixel_index];

			// Check if the depth value is invalid (<=0) or greater than the threashold
			if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
			{
				// Calculate the offset in other frame's buffer to current pixel
				auto offset = depth_pixel_index * other_bpp;

				// Set pixel to "background" color (0x999999)
				std::memset(&p_other_frame[offset], 0x99, other_bpp);
			}
		}
	}
}

rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams)
{
	//Given a vector of streams, we try to find a depth stream and another stream to align depth with.
	//We prioritize color streams to make the view look better.
	//If color is not available, we take another stream that (other than depth)
	rs2_stream align_to = RS2_STREAM_ANY;
	bool depth_stream_found = false;
	bool color_stream_found = false;
	for (rs2::stream_profile sp : streams)
	{
		rs2_stream profile_stream = sp.stream_type();
		if (profile_stream != RS2_STREAM_DEPTH)
		{
			if (!color_stream_found)         //Prefer color
				align_to = profile_stream;

			if (profile_stream == RS2_STREAM_COLOR)
			{
				color_stream_found = true;
			}
		}
		else
		{
			depth_stream_found = true;
		}
	}

	if (!depth_stream_found)
		throw std::runtime_error("No Depth stream available");

	if (align_to == RS2_STREAM_ANY)
		throw std::runtime_error("No stream found to align with Depth");

	return align_to;
}
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值