OpenGL显示深度相机的RGBD视频_PicoZense_DCAM710

3 篇文章 1 订阅
2 篇文章 0 订阅

所有你想要的都在这儿了:

opengl 显示深度图,彩色图;播放视频;显示点云视频;还有等等技术

且让我一一道来:

准备工作:~如果你只想要一个opengl显示视频的demo,ok,在后面呢,不急哈;

1,环境搭建:opengl 我用的是glew + freeglut; 深度相机我用的是PicoZense DCAM710;

2, 好了可以愉快的建立工程了,神秘微笑,包含目录,链接库啥的,嘿嘿~

3, 嗯,我们需要一个shader.h来辅助加载和编译opengl shaders,~这个不是我的原创,类似头文件很普遍;

#ifndef _SHADER_H_
#define _SHADER_H_

#include <GL\glew.h>
#include <iterator>     // std::istreambuf_iterator
#include <string>
#include <vector>
#include <iostream>
#include <fstream>

struct ShaderFile
{
	GLenum shaderType;
	const char* filePath;
	ShaderFile(GLenum type, const char* path)
		:shaderType(type), filePath(path){}
};


class Shader
{
public:
	Shader(const char* vertexPath, const char* fragPath) :programId(0)
	{
		std::vector<ShaderFile> fileVec;
		fileVec.push_back(ShaderFile(GL_VERTEX_SHADER, vertexPath));
		fileVec.push_back(ShaderFile(GL_FRAGMENT_SHADER, fragPath));
		loadFromFile(fileVec);
	}
	Shader(const char* vertexPath, const char* fragPath, const char* geometryPath) :programId(0)
	{
		std::vector<ShaderFile> fileVec;
		fileVec.push_back(ShaderFile(GL_VERTEX_SHADER, vertexPath));
		fileVec.push_back(ShaderFile(GL_FRAGMENT_SHADER, fragPath));
		fileVec.push_back(ShaderFile(GL_GEOMETRY_SHADER, geometryPath));
		loadFromFile(fileVec);
	}
	void use() const
	{
		glUseProgram(this->programId);
	}
	~Shader()
	{
		if (this->programId)
		{
			glDeleteProgram(this->programId);
		}
	}
public:
	GLuint programId;
private:
	/*
	*从文件加载顶点和片元着色器
	*传递参数为[(着色器文件类型,着色器文件路径)]
	*/
	void loadFromFile(std::vector<ShaderFile>& shaderFileVec)
	{
		std::vector<GLuint> shaderObjectIdVec;
		std::string vertexSource, fragSource;
		std::vector<std::string> sourceVec;
		size_t shaderCount = shaderFileVec.size();
		//读取文件源代码
		for (size_t i = 0; i < shaderCount; ++i)
		{
			std::string shaderSource;
			if (!loadShaderSource(shaderFileVec[i].filePath, shaderSource))
			{
				std::cout << "Error::Shader could not load file:" << shaderFileVec[i].filePath << std::endl;
				return;
			}
			sourceVec.push_back(shaderSource);
		}
		bool bSuccess = true;
		//编译 Shader object
		for (size_t i = 0; i < shaderCount; ++i)
		{
			GLuint shaderId = glCreateShader(shaderFileVec[i].shaderType);
			const char *c_str = sourceVec[i].c_str();
			glShaderSource(shaderId, 1, &c_str, NULL);
			glCompileShader(shaderId);
			GLint compileStatus = 0;
			glGetShaderiv(shaderId, GL_COMPILE_STATUS, &compileStatus);//检查编译状态
			if (compileStatus == GL_FALSE)//获取错误报告
			{
				GLint maxLength = 0;
				glGetShaderiv(shaderId, GL_INFO_LOG_LENGTH, &maxLength);
				std::vector<GLchar> errLog(maxLength);
				glGetShaderInfoLog(shaderId, maxLength, &maxLength, &errLog[0]);
				std::cout << "Error::Shader file [" << shaderFileVec[i].filePath << " ] compiled failed,"
					<< &errLog[0] << std::endl;
				bSuccess = false;
			}
			shaderObjectIdVec.push_back(shaderId);
		}
		//链接shader program
		if (bSuccess)
		{
			this->programId = glCreateProgram();
			for (size_t i = 0; i < shaderCount; ++i)
			{
				glAttachShader(this->programId, shaderObjectIdVec[i]);
			}
			glLinkProgram(this->programId);
			GLint linkStatus;
			glGetProgramiv(this->programId, GL_LINK_STATUS, &linkStatus);
			if (linkStatus == GL_FALSE)
			{
				GLint maxLength = 0;
				glGetProgramiv(this->programId, GL_INFO_LOG_LENGTH, &maxLength);
				std::vector<GLchar> errLog(maxLength);
				glGetProgramInfoLog(this->programId, maxLength, &maxLength, &errLog[0]);
				std::cout << "Error::Shader link failed," << &errLog[0] << std::endl;
			}
		}
		//连接完成后detach 并释放 shader object
		for (size_t i = 0; i < shaderCount; ++i)
		{
			if (this->programId != 0)
			{
				glDetachShader(this->programId, shaderObjectIdVec[i]);
			}
			glDeleteShader(shaderObjectIdVec[i]);
		}
	}
	/*
	*读取着色器程序源码
	*/
	bool loadShaderSource(const char* filePath, std::string& source)
	{
		source.clear();
		std::ifstream in_stream(filePath);
		if (!in_stream)
		{
			return false;
		}
		source.assign(std::istreambuf_iterator<char>(in_stream),
			std::istreambuf_iterator<char>()); //文件流迭代器构造字符串
		return true;
	}
};
#endif

4, 我还准备了深度相机的SDK和辅助头文件,来实时获取RGBD数据流;SDK自己去官网下载PicoZense_DCAM710_SDK

#ifndef _PSFRAMEHELPPER_
#define _PSFRAMEHELPPER_

#include <iostream>
#include "PicoZense_api.h" //yes, 这就是深度相机的SDK头文件

class PsFrameHelpper
{
public:
	void PsFrameInit()
	{
		using namespace std;
		status = PsInitialize();
		if (status != PsReturnStatus::PsRetOK)
		{
			cout << "Initialize failed!" << endl;
			system("pause");
			exit(0);
		}

		status = PsOpenDevice(deviceIndex);
		if (status != PsReturnStatus::PsRetOK)
		{
			cout << "OpenDevice 0 failed!" << endl;
			system("pause");
			exit(0);
		}
		PsSetMapperEnabledDepthToRGB(deviceIndex, true);
		status = PsStartFrame(deviceIndex, PsDepthFrame);
		status = PsStartFrame(deviceIndex, PsMappedRGBFrame);

	}

	
	void PsFrameClose()
	{
		status = PsStopFrame(deviceIndex, PsDepthFrame);

		status = PsCloseDevice(deviceIndex);

		status = PsShutdown();
	}
public:
	PsReturnStatus status;
	int32_t deviceIndex = 0;
};

#endif

 5,你以为已经准备妥当了?其实我也想~  我们还要两个小shader来指示如何显示图片~

面片着色器:triangle.frag

#version 330 core

in vec2 tex_coord;
layout (location = 0) out vec4 color;

uniform sampler2D tex;

void main(void)
{
	color = texture(tex,tex_coord);
	color = color.bgra;
}

顶点着色器:triangle.vert

#version 330 core

layout (location = 0) in vec2 in_position;
layout (location = 1) in vec2 in_tex_coord;

out vec2 tex_coord;

void main(void)
{
    gl_Position = vec4(in_position, 0.0, 1.0);
    tex_coord = in_tex_coord;
 }

6,前菜上完了,到正餐了,对,精髓~

#include <iostream>
#include <fstream>
#include "PsFrameHelpper.h"
#include <GL\glew.h>
#include <GL\freeglut.h>

#include "shader.h"
#include <windows.system.h>

#define  GLUT_WHEEL_UP 3           //定义滚轮操作  
#define  GLUT_WHEEL_DOWN 4 

struct Vertex
{
	GLfloat x;
	GLfloat y;
	GLfloat z;
} ;

SYSTEMTIME sys;

// settings//-------------------------------------------------------------------------
const int SCR_WIDTH = 1493, SCR_HEIGHT =960;
static void UShort2Gray(PsDepthPixel  *DepthImg, PsGray8Pixel *GrayImg, int width, int height, PsDepthRange DepthRange, bool isDepth2Gray);
static void Gray2Color(PsGray8Pixel *GrayImg, int width, int height, PsBGR888Pixel *ColorImg);
void DrowPointCloud();
void PointCloudWriter(bool isDepth);
void KeyBoards(unsigned char key, int x, int y);
void onMouseMove(int x, int y);
void myMouse(int button, int state, int x, int y);
void PrintUsage(){ std::cout << "Enter Numbers: 1 Near; 2 Mid; 3 Far; 5 ColorMap; 6 Show PointCloud; 8 PointCloudWrite" << std::endl; }

GLuint program;
GLuint vao;
GLuint quad_vbo;
GLuint tex;

float thetaX = 0.0, thetaY = 0.0, scaleFactor = 1.0;
static float dx = 0, dy = 0, oldy = -1, oldx = -1;
Vertex ptsCen = {0.0f, 0.0f, -2.5f};

PsFrameHelpper FrameHelpper;
PsReturnStatus status;
int32_t deviceIndex = 0;
PsFrameMode rgbFrameMode;
PsFrameMode depthFrameMode;
PsDepthRange DepthRange;
PsFrame rgbFrame = { 0 };
PsFrame depthFrame = { 0 };
PsFrame depthrgbFrame = { 0 };

PsCameraParameters cameraParam = { 0.0 };

bool colormap = true;
bool showPointCloud = true;
bool fullscreen = false;
long delayT;
//initial//-------------------------------------------------------------------------
void init(void)
{
	//initialized PsSDK
	FrameHelpper.PsFrameInit();
	//get depthRange to set the Slope
	PsGetDepthRange(deviceIndex, &DepthRange);
	//get cameraParam to caculate the vertices position
	PsGetCameraParameters(deviceIndex, PsDepthSensor, &cameraParam);
	//get FrameMode to understand the framedata
	PsGetFrameMode(deviceIndex, PsRGBFrame, &rgbFrameMode);
	PsGetFrameMode(deviceIndex, PsDepthFrame, &depthFrameMode);

	//initial openGL
	glClearColor(0.0, 0.0, 0.0, 0.0);
	glEnable(GL_DEBUG_OUTPUT);
	static const GLfloat quad_data[] ={
		-1.0f, -1.0f,
		1.0f, -1.0f,
		-1.0f, 1.0f,
		1.0f, 1.0f,

		0.0f, 1.0f,
		1.0f, 1.0f,
		0.0f, 0.0f,
		1.0f, 0.0f,
	};
	glGenVertexArrays(1, &vao);
	glBindVertexArray(vao);
	glGenBuffers(1, &quad_vbo);
	glBindBuffer(GL_ARRAY_BUFFER, quad_vbo);
	glBufferData(GL_ARRAY_BUFFER, sizeof(quad_data), quad_data, GL_STATIC_DRAW);
	
	glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
	glEnableVertexAttribArray(0);
	glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)(8 * sizeof(float)));
	glEnableVertexAttribArray(1);
	glBindVertexArray(0);

	//shader
	static Shader shader("triangle.vert", "triangle.frag");
	program = shader.programId;

	// texture
	glGenTextures(1, &tex);
	glBindTexture(GL_TEXTURE_2D, tex);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	glBindTexture(GL_TEXTURE_2D, 0);
}
//Display//-------------------------------------------------------------------------
void display(void)
{
	//Get Frame
	GetLocalTime(&sys);
	long dwStart = sys.wMilliseconds;
	PsReadNextFrame(deviceIndex);
	PsGetFrame(deviceIndex, PsRGBFrame, &rgbFrame);
	PsGetFrame(deviceIndex, PsDepthFrame, &depthFrame);
	PsGetFrame(deviceIndex, PsMappedRGBFrame, &depthrgbFrame);
	
	//opengl display
	glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
	glUseProgram(program);
	glBindVertexArray(vao);
	glBindTexture(GL_TEXTURE_2D, tex);
	glUniform1i(glGetUniformLocation(program, "tex"), 0);

	//show RGB image
	if (rgbFrame.pFrameData != NULL && !fullscreen){
		glViewport(640, 480, 853, 480);
		glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
			rgbFrameMode.resolutionWidth, rgbFrameMode.resolutionHeight,
			0, GL_RGB, GL_UNSIGNED_BYTE, rgbFrame.pFrameData);
		glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
	}

	//show Depth image
	if (depthFrame.pFrameData != NULL&& !fullscreen){
		glViewport(0,480, 640, 480);
		//Depth Frame processing
		PsDepthPixel  *DepthFrameData = (PsDepthPixel *)depthFrame.pFrameData;
		PsGray8Pixel *depthTex = new PsGray8Pixel[640 * 480];
		UShort2Gray(DepthFrameData, depthTex, 640, 480, DepthRange, true);
		if (!colormap){
			//Depth_Gray image
			glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, 640, 480, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, depthTex);
		}
		else{
			//Depth_color image
			PsBGR888Pixel *ColorImg = new PsBGR888Pixel[640 * 480];
			Gray2Color(depthTex, 640, 480, ColorImg);
			glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, ColorImg);
			delete ColorImg;
		}
		delete depthTex;
		glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
	}
	if (depthrgbFrame.pFrameData != NULL&& !fullscreen){
		glViewport(0,0, 640, 480);
		glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,640, 480,0, GL_RGB, GL_UNSIGNED_BYTE, depthrgbFrame.pFrameData);
		glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
	}
	glBindVertexArray(0);
	glUseProgram(0);
	
	if (showPointCloud){
		if (fullscreen)
			glViewport(0, 0, 1493, 960);
		else
			glViewport(640, 0, 853, 480);
		glEnable(GL_DEPTH_TEST);
		if (thetaY<0){
			thetaY = thetaY + 360;
		}
		if (thetaY>360){
			thetaY = thetaY - 360;
		}
		if (thetaX<0){
			thetaX = thetaX + 360;
		}
		if (thetaX>360){
			thetaX = thetaX - 360;
		}
		glMatrixMode(GL_PROJECTION);
		glLoadIdentity();
		gluPerspective(45.0, (GLfloat)853 / (GLfloat)480, 0.1, 1000.0);
		glMatrixMode(GL_MODELVIEW);
		glLoadIdentity();
		gluLookAt(0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
		glRotatef(thetaX, 1, 0, 0);
		glRotatef(thetaY, 0, 1, 0);
		glScalef(scaleFactor, scaleFactor, scaleFactor);
		glTranslatef(-ptsCen.x, -ptsCen.y, -ptsCen.z);
		DrowPointCloud();
	}
	glutSwapBuffers();

	GetLocalTime(&sys);
	long dwEnd = sys.wMilliseconds;
	long TimeSpend = dwEnd - dwStart;
	long timedelay = dwEnd - delayT;
	delayT = dwEnd;
	if (TimeSpend < 0){
		TimeSpend += 1000;
	}
	if (timedelay < 0){
		timedelay += 1000;
	}
	//printf("frame TimeSpend: %d, fps: %d Hz\n", TimeSpend, 1000 / timedelay);
}
//ShutDown//----------------------------------------------------------------------------
void OnShutdown()
{
	//destroy vbo & vao
	glDeleteTextures(1, &tex);
	glDeleteBuffers(1, &quad_vbo);
	glDeleteVertexArrays(1, &vao);
	FrameHelpper.PsFrameClose();
	printf("Shutdown successfull");
}
//Timer//----------------------------------------------------------------------------
void TimerFunc(int value)
{
	glutPostRedisplay();
	glutTimerFunc(33, TimerFunc, 1);
}
//Main//----------------------------------------------------------------------------
int main(int argc, char *argv[])
{
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
	glutInitWindowPosition(200, 10);
	glutInitWindowSize(SCR_WIDTH, SCR_HEIGHT);
	glutCreateWindow("GL_MultiFrame_Viewer");
	PrintUsage();
	if (glewInit() != GLEW_OK){
		printf("Failed to initialize GLEW ... exiting");
		exit(EXIT_FAILURE);
	}
	init();

	glutKeyboardFunc(&KeyBoards);
	glutMouseFunc(myMouse);
	glutMotionFunc(onMouseMove);

	glutDisplayFunc(&display);

	glutTimerFunc(33, TimerFunc, 1);
	glutCloseFunc(OnShutdown);

	glutMainLoop();

	return 0;
}

//Drow PointCloud//----------------------------------------------------------------------------
void DrowPointCloud(){
	PsVector3f  mWorldVector = { 0.0f };
	PsDepthPixel* pdepth = (PsDepthPixel*)depthFrame.pFrameData;
	PsBGR888Pixel * pcolordepth = (PsBGR888Pixel *)depthrgbFrame.pFrameData;
	glPointSize(1.0f);
	float fR = 1.0f, fG =1.0f, fB = 1.0f;
	glBegin(GL_POINTS);
	for (int h = 0; h < 480; h++){
		for (int w = 0; w < 640; w++, pcolordepth++){
			if (pdepth[h * 640 + w] == 0)
				continue; //discard zero-depth points
			else{
				//caculate Vertices Position by depthFrame and cameraParam
				mWorldVector.x = (w - cameraParam.cx) / cameraParam.fx * pdepth[h * 640 + w];
				mWorldVector.y = (h - cameraParam.cy) / cameraParam.fy * pdepth[h * 640 + w];
				mWorldVector.z = pdepth[h * 640 + w];
				fR = (float)pcolordepth->r / 255;
				fG = (float)pcolordepth->g / 255;
				fB = (float)pcolordepth->b / 255;
				//drow point
				glColor3f(fR, fG, fB);
				glVertex3f(mWorldVector.x / 100, -1*mWorldVector.y / 100, -1 * mWorldVector.z / 100);
			}
		}
	}
	glEnd();
}
//KeyBoards//----------------------------------------------------------------------------
void KeyBoards(unsigned char key, int x, int y)
{
	switch (key){
	case '1':
		PsSetDepthRange(deviceIndex, PsNearRange);
		PsGetDepthRange(deviceIndex, &DepthRange);
		std::cout << "Set DepthRange: Near" << std::endl;
		PrintUsage();
		break;
	case '2':
		PsSetDepthRange(deviceIndex, PsMidRange);
		PsGetDepthRange(deviceIndex, &DepthRange);
		std::cout << "Set DepthRange: Mid" << std::endl;
		PrintUsage();
		break;
	case '3':
		PsSetDepthRange(deviceIndex, PsFarRange);
		PsGetDepthRange(deviceIndex, &DepthRange);
		std::cout << "Set DepthRange: Far" << std::endl;
		PrintUsage();
		break;
	case '5':
		colormap = !colormap;
		break;
	case'6':
		showPointCloud = !showPointCloud;
		std::cout << "show pointcloud : " << showPointCloud<<std::endl;
		break;
	case '7':
		PointCloudWriter(true);
		PrintUsage();
		break;
	case '8':
		PointCloudWriter(false);
		PrintUsage();
		break;
	case '9':
		fullscreen = !fullscreen;
		break;
	case 27:
		exit(0);
		break;
	}
}
void myMouse(int button, int state, int x, int y)       
{
	if (state == GLUT_DOWN && button == GLUT_LEFT_BUTTON)                
		oldx = x, oldy = y;
	if (state == GLUT_DOWN && button == GLUT_RIGHT_BUTTON){           
		thetaX = 0; thetaY = 0; scaleFactor = 1;
		glutPostRedisplay();
	}
	if (state == GLUT_UP && button == GLUT_WHEEL_UP){
		scaleFactor +=0.1f;
		glutPostRedisplay();
	}
	if (state == GLUT_UP && button == GLUT_WHEEL_DOWN){
		scaleFactor -= 0.1f;
		glutPostRedisplay();
	}
}
void onMouseMove(int x, int y)     
{
	dx += x - oldx;
	dy += y - oldy;
	thetaX = dy / 853 * 90;
	thetaY = dx / 853 * 90;
	oldx = x, oldy = y;             
	glutPostRedisplay();
}

//Auxiliary Function
//PointCloud Writer//----------------------------------------------------------------------------
void PointCloudWriter(bool isDepth){
	std::ofstream pointCloudWriter;
	PsVector3f  WorldVector = { 0.0f };
	PsCameraParameters cameraParam = { 0.0 };
	PsFrame rgbdepthFrame = { 0 };
	PsDepthPixel* pdepth =0;
	int h, w;
	if (!isDepth){
		PsGetFrame(deviceIndex, PsMappedRGBFrame, &rgbdepthFrame);
		pdepth = (PsDepthPixel*)rgbdepthFrame.pFrameData;
		PsGetCameraParameters(deviceIndex, PsRgbSensor, &cameraParam);
		pointCloudWriter.open("RgbDepthPointCloud.txt");\
		h = rgbFrameMode.resolutionHeight;
		w = rgbFrameMode.resolutionWidth;
	}
	else{
		pdepth = (PsDepthPixel*)depthFrame.pFrameData;
		PsGetCameraParameters(deviceIndex, PsDepthSensor, &cameraParam);
		pointCloudWriter.open("PointCloud.txt");
		h = depthFrameMode.resolutionHeight;
		w = depthFrameMode.resolutionWidth;
	}
	pointCloudWriter.setf(std::ios_base::fixed);
	pointCloudWriter.precision(3);
	
	for (int i = 0; i < h; i++)
	{
		for (int j = 0; j < w; j++)
		{
			if (pdepth[i * w + j] == 0)
				continue; //remove zero points
			else{
				WorldVector.x = (j - cameraParam.cx) / cameraParam.fx * pdepth[i * w + j];
				WorldVector.y = (i - cameraParam.cy) / cameraParam.fy * pdepth[i * w + j];
				WorldVector.z = pdepth[i * w + j];
				pointCloudWriter.width(8);
				pointCloudWriter << WorldVector.x << "\t";
				pointCloudWriter.width(8);
				pointCloudWriter << WorldVector.y << "\t";
				pointCloudWriter.width(8);
				pointCloudWriter << WorldVector.z << std::endl;
			}
		}
	}
	std::cout << "Write file" << std::endl;
	pointCloudWriter.close();
}
//color map in rainbow//----------------------------------------------------------------------------
static void Gray2Color(PsGray8Pixel *GrayImg, int width, int height, PsBGR888Pixel *ColorImg)
{
	PsBGR888Pixel* tempRGB = ColorImg;
	unsigned char grayValue;
	for (int i = 0; i < width * height; i++, tempRGB++)
	{
		grayValue = GrayImg[i];
		//Black BG 
		if (grayValue == 0){
			tempRGB->r = 0;
			tempRGB->g = 0;
			tempRGB->b = 0;
		}
		else if (grayValue <= 51){
			tempRGB->r = 255;
			tempRGB->g = grayValue * 5;
			tempRGB->b = 0;
		}
		else if (grayValue <= 102){
			grayValue -= 51;
			tempRGB->r = 255 - grayValue * 5;
			tempRGB->g = 255;
			tempRGB->b = 0;
		}
		else if (grayValue <= 153){
			grayValue -= 102;
			tempRGB->r = 0;
			tempRGB->g = 255;
			tempRGB->b = grayValue * 5;
		}
		else if (grayValue <= 204){
			grayValue -= 153;
			tempRGB->r = 0;
			tempRGB->g = 255 - static_cast<unsigned char>(grayValue *128.0 / 51 + 0.5);
			tempRGB->b = 255;
		}
		else if (grayValue <= 255){
			grayValue -= 204;
			tempRGB->r = 255;
			tempRGB->g = 127 - static_cast<unsigned char>(grayValue *127.0 / 51 + 0.5);
			tempRGB->b = 0;
		}
	}
}
//Unsigned short to unsigned char//----------------------------------------------------------------------------
static void UShort2Gray(PsDepthPixel  *DepthImg, PsGray8Pixel *GrayImg, int width, int height, PsDepthRange DepthRange, bool isDepth2Gray)
{
	uint32_t slope;
	//Depth16 to Depth8
	if (isDepth2Gray){
		//slope depends on DepthRange
		if (DepthRange == PsNearRange)
			slope = 1450;
		else if (DepthRange == PsMidRange)
			slope = 3000;
		else
			slope = 4400;
	}
	//Gray16 to Gray8
	else
		slope = 3840;
	for (int i = 0; i< width * height; i++)
	{
		int grayValue;
		if (int(DepthImg[i])<slope)
			grayValue = int(DepthImg[i])* 255 / slope;
		else 
			grayValue = 255;
		GrayImg[i] = grayValue;
	}
}

7,如果不多说两句,这代码仍然没有灵魂~ 对我要开大了

  第一,为啥要定义一个矩形顶点数组:static const GLfloat quad_data[]

不瞒你说,opengl需要一个“画布”也就是一个矩形面片来播放图像,我们不停的给这个面片贴上(通过纹理贴图)相机更新上来的图像,这样就是视频啦~

第二,为什么需要计时器TimerFunc(int value),没有他,你就不知道什么时候更新这个循环,他让我们的循环更加方便;

第三, 为什么把uint_16的深度图转换到uint_8的类型(UShort2Gray)?因为我们不能显示16位的图像,所以将他先映射到255空间,然后给他定义一个伪彩色转换(Gray2Color),这样我们就可以看到漂亮的深度图啦~

第四,点云转换,和彩色点云的绘制,这个有点技术含量,但没有门槛的,一下就会,老生常谈了,那个转换的核心代码到处都是,每得到一个(x,y,z)记得从map好的彩色图里对应取出他的对应像素点,把这个点当成颜色画给那个需要他的坐标点,嗯,picozense 的SDK已经帮你准备好了对准的彩色图,只要你先设定一下,具体看那个helpper;

第五, 我们会显示三路图像视频,和一路点云视频。图像有深度图,彩色图,对齐图,同时点云还要可以被鼠标控制旋转和缩放,这需要一点技巧,我们把他们化成四个小窗口,分别绘制他们,三张图像的播放思路是一样的,点云另起一条思路。这里我们用到一个有用的gl函数:如glViewport(0,480, 640, 480);他指定了这个函数放在哪个位置,嗯,很关键~

第六, 其实是上面没说完的,点云的鼠标控制,当然还有键盘响应事件,我们定义鼠标回调幻术,对幻术,哈哈哈~这需要opengl的一些组合控制才能配合好,哎呀,我实在讲不下去啦,这地方最乱~你自己琢磨吧~你自己琢磨吧~~嗯,推卸真香~

最后,来,给你们看一下这加了特效的duang~

嗯,很简洁的布局,真机智~

 

欢迎转载,不过别忘标注出处哦~

下载地址:Opengl_Display_RGBD.zip

                                       

 

 

 

 

  • 5
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
### 回答1: rgbd_dataset_freiburg3_walking_xyz是一个用于室内行走场景的RGB-D数据集。该数据集由德国弗莱堡大学计算机视觉实验室创建,并提供了一系列的RGB图像和深度图像。这些图像是通过使用RGB-D相机(如微软的Kinect)采集的。 在这个数据集中,被记录的是一个人在室内环境中行走的场景。通过记录RGB图像和深度图像,我们可以获取到场景中物体的外观信息和距离信息。这些图像可以用于物体识别、姿态估计、三维重建等计算机视觉任务。 由于该数据集是通过跟踪一个人在室内行走的过程来采集的,因此它对于研究行人行为和导航算法也具有很大的应用潜力。研究人员可以使用这个数据集来开发和测试各种室内导航算法,比如行人跟踪、路径规划等。 此外,rgbd_dataset_freiburg3_walking_xyz还提供了与场景关联的时间戳和相机运动信息。这些信息可以用于对图像序列进行校准和对齐,从而提高其在后续处理中的可用性和准确性。 总之,rgbd_dataset_freiburg3_walking_xyz是一个具有丰富视觉信息的室内行走场景的数据集,对于计算机视觉和机器学习领域的研究和应用具有重要价值。通过使用这个数据集,我们可以更好地理解和探索室内行走场景中的物体识别、行人行为和导航算法等问题。 ### 回答2: "rgbd_dataset_freiburg3_walking_xyz" 是一个数据集的名称,主要用于计算机视觉领域中的RGB-D数据处理和定位任务。 该数据集由德国弗莱堡大学提供,并收集了以行走方式获取的RGB-D图像序列。通过使用深度相机RGB相机的组合,该数据集能够同时提供色彩信息和深度信息。这些图像序列记录了一个人在室内环境中步行的过程。 这个数据集对于研究人员和工程师来说非常有价值,因为它提供了一个真实世界的场景,可以用于开发和测试各种基于视觉的应用程序。例如,该数据集可以用于机器人导航系统的开发,通过分析RGB-D图像来实现精确的定位和路径规划。此外,该数据集还可以用于人体动作识别、三维重建和物体识别等其他计算机视觉任务。 数据集中的每个图像序列都有时间戳和相机的运动轨迹信息,这对于研究人员和工程师来说是非常重要的。同时,数据集中还包含相机的内部参数和外部参数,这些参数对于相机姿态的估计和三维重建等任务非常关键。 综上所述,“rgbd_dataset_freiburg3_walking_xyz”是一个用于计算机视觉研究的宝贵数据集,可以用于开发和测试各种基于RGB-D图像的应用程序,如机器人导航、动作识别和三维重建等。 ### 回答3: rgbd_dataset_freiburg3_walking_xyz 是指一个用于视觉定位和运动轨迹估计的RGB-D数据集。 该数据集是由RGB摄像头和深度传感器(D)同时记录下来的。它记录了一个人在弗莱堡市的室内环境中正常行走的场景。 数据集中的每一帧图像都包含RGB图像和深度图。RGB图像可以提供场景的颜色信息,而深度图则可以提供物体的距离和形状信息。 由于是在行走过程中采集的数据,因此数据集中的图像序列可以用于运动轨迹估计和相机跟踪的研究。通过分析连续的图像帧,可以推断相机的位姿和运动轨迹。 这个数据集对于测试和评估基于RGB-D输入的定位和导航算法非常有用。它可以用于构建和训练机器学习模型,进一步提高算法在移动机器人、增强现实、虚拟现实等领域的应用性能。 总之,rgbd_dataset_freiburg3_walking_xyz 数据集是用于研究和测试定位和运动轨迹估计的RGB-D数据集,它记录了在一个室内环境中行走的人的场景,对于相关研究和算法开发具有重要的参考价值。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值