(四)a.1代码+a.2代码:播放全景视频,按键移动视角,封装函数


跟a.0相比,a.1主要改了一个地方,就是把读取图片改成读取视频帧;
capture = cvCreateFileCapture(VideoAddr);	//这四句就是第一部分的改动,在main函数里面
int frames = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
frame = cvQueryFrame(capture);
picture_remap.rgbImg = cvarrToMat(frame);

//picture_remap.rgbImg = imread("4k.jpg");

a.1完整代码:

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2\opencv.hpp> 
#include <opencv2\core\opengl.hpp>
#include <iostream>
#include <Windows.h>
#include <vector>
#include <stdio.h>
#include "conio.h"
#include <opencv2/cudawarping.hpp>
//#include <opencv2/cudaarithm.hpp>

#define __STDC_CONSTANT_MACROS

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
#include "SDL.h"
	//
};


#include <GLTools.h>	// OpenGL toolkit
#include <GLMatrixStack.h>
#include <GLFrame.h>
#include <GLFrustum.h>
#include <GLGeometryTransform.h>
#include <StopWatch.h>

#include <math.h>
#include <stdlib.h>
#include <GL/gl.h>

#ifdef __APPLE__
#include <glut/glut.h>
#else
#include <GL/glut.h>
#endif
//
CvCapture * capture = NULL;
char * VideoAddr = "zzz.mp4";
IplImage * frame;
#define ESC 27
#define PAUSE 32

//
GLFrame             viewFrame;
GLFrustum           viewFrustum;
GLBatch             cubeBatch;
GLMatrixStack       modelViewMatrix;
GLMatrixStack       projectionMatrix;
GLGeometryTransform transformPipeline;
GLuint              cubeTexture;
GLint               skyBoxShader;

GLint               locMVPReflect, locMVReflect, locNormalReflect, locInvertedCamera;
GLint				locMVPSkyBox;


//Refresh Event
#define SFM_REFRESH_EVENT  (SDL_USEREVENT + 1)
#define SFM_BREAK_EVENT  (SDL_USEREVENT + 2)

using namespace std;
using namespace cv;

#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif


Mat warpMat1[6], warpMat2[6], dst[6];
Mat warpMat[6];
cv::cuda::GpuMat src[2], wM1[6], wM2[6];
cv::cuda::Stream src_stream[2];
ogl::Buffer wBuf[6];

DWORD start_time, start_time_1, end_time;

LARGE_INTEGER nFreq;
LARGE_INTEGER nBeginTime;
LARGE_INTEGER nEndTime;

struct RGB_pic    //存储修改前文件 yuv->RGB
{
	cv::Mat rgbImg;
	int length;
	int height;

} picture_remap;

struct convert_map  //存6个面的数组
{
	cv::Mat face[6];

} cvtmap;


//SDL


SDL_Window *screen;

SDL_Renderer* sdlRenderer;

SDL_Texture* sdlTexture;

SDL_Rect sdlRect;
//Parameters	

SDL_Thread *video_tid;

SDL_Thread *pic[6];


SDL_Event event;

SDL_AudioSpec wanted_spec;

//下面是线程参数

int handle[6] = { 0,1,2,3,4,5 };



//下面是文件参数 需要修改

FILE* pFileIn;

int w = 1920;

int h = 1080;

cv::Mat yuvImg;

int bufLen = w*h * 3 / 2;

unsigned char* pYuvBuf = new unsigned char[bufLen];

Mat dstImg, dst1Img;

unsigned char *pBytes;


template <typename T>            //176-278 不要动  变换
inline T square(const T x)
{
	return x * x;
}

template <typename T>
inline T clamp(const T& x, const T& a, const T& b)
{
	return x < a ? a : x > b ? b : x;
}

enum CubemapFace
{
	CUBEMAP_FACE_BACK,
	CUBEMAP_FACE_LEFT,
	CUBEMAP_FACE_FRONT,
	CUBEMAP_FACE_RIGHT,
	CUBEMAP_FACE_TOP,
	CUBEMAP_FACE_BOTTOM
};

GLenum  cube[6] =
{
	GL_TEXTURE_CUBE_MAP_POSITIVE_X,
	GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
	GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
	GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
	GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
	GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
};


inline Vec3f cubemapIndexToVec3(const float x, const float y, const CubemapFace face)
{
	// rotate and flip the direction as a function of the face
	Vec3f dir(x, y, 0.5f);
	Vec3f dirOut = dir;
	switch (face) {
	case CUBEMAP_FACE_BACK:
		dirOut[0] = dir[0];
		dirOut[1] = dir[2];
		dirOut[2] = -dir[1];
		break;
	case CUBEMAP_FACE_LEFT:
		dirOut[0] = -dir[2];
		dirOut[1] = dir[0];
		dirOut[2] = -dir[1];
		break;
	case CUBEMAP_FACE_TOP: break; // no-op
	case CUBEMAP_FACE_BOTTOM:
		dirOut[0] = dir[0];
		dirOut[1] = -dir[1];
		dirOut[2] = -dir[2];
		break;
	case CUBEMAP_FACE_FRONT:
		dirOut[0] = -dir[0];
		dirOut[1] = -dir[2];
		dirOut[2] = -dir[1];
		break;
	case CUBEMAP_FACE_RIGHT:
		dirOut[0] = dir[2];
		dirOut[1] = -dir[0];
		dirOut[2] = -dir[1];
		break;
	}
	return dirOut;
}

void mapEquirectToCubemapCoordinate(
	const float x,
	const float y,
	const CubemapFace& face,
	const Mat& srcEqrMat,
	const float fisheyeFovRadians,
	float& srcX,
	float& srcY)
{

	const Vec3f dir = cubemapIndexToVec3(x, y, face);
	const float r = sqrtf(square(dir[0]) + square(dir[1]));
	const float phi = acosf(dir[2] / norm(dir));
	float theta = r > 0.0f ? acosf(fabs(dir[0] / r)) : 0.0f;

	if (dir[0] > 0 && dir[1] > 0) { // Quadrant I
									// (nothing to do)
	}
	else if (dir[0] <= 0 && dir[1] > 0) { // Quadrant II
		theta = M_PI - theta;
	}
	else if (dir[0] <= 0 && dir[1] <= 0) { // Quadrant III
		theta = M_PI + theta;
	}
	else { // Quadrant IV
		theta = 2 * M_PI - theta;
	}


	const float phiPrime = clamp(phi, 0.0f, fisheyeFovRadians);
	const float thetaPrime = clamp(theta, 0.0f, float(2.0f * M_PI));
	srcX = float(srcEqrMat.cols) * thetaPrime / (2.0f * M_PI);
	srcY = float(srcEqrMat.rows) * phiPrime / fisheyeFovRadians;
}


int wid = 512;
int MatInit()  //不要动
{
				

				CubemapFace face;
				//img size
				wid = sqrtf((float(picture_remap.rgbImg.cols) * float(picture_remap.rgbImg.rows)) / 6)+0.5;
				
				const float dy = 1.0f / float(wid);
				const float dx = 1.0f / float(wid);
				for (int ii = 0; ii < 6; ii++)
				{
					warpMat1[ii] = Mat(Size(wid, wid), CV_32FC1);
					warpMat2[ii] = Mat(Size(wid, wid), CV_32FC1);
					warpMat[ii] = Mat(Size(wid, wid), CV_32FC2);
					//cv::ogl::Buffer::Target
					wBuf[ii].create(Size(wid, wid), picture_remap.rgbImg.type(), ogl::Buffer::Target::PIXEL_UNPACK_BUFFER);
					switch (ii)
					{
					case 0:
						face = CUBEMAP_FACE_RIGHT;
						break;
					case 1:
						face = CUBEMAP_FACE_LEFT;
						break;
					case 2:
						face = CUBEMAP_FACE_TOP;
						break;
					case 3:
						face = CUBEMAP_FACE_BOTTOM;
						break;
					case 4:
						face = CUBEMAP_FACE_BACK;
						break;
					case 5:
						face = CUBEMAP_FACE_FRONT;
						break;
					}

					for (int j = 0; j < wid; ++j)
					{
						for (int i = 0; i < wid; ++i)
						{
							float srcX;
							float srcY;
							mapEquirectToCubemapCoordinate(
								float(i) * dy - 0.5f,
								float(j) * dx - 0.5f,
								face,
								picture_remap.rgbImg,
								M_PI,
								srcX, srcY);

							warpMat1[ii].at<float>(j, i) = static_cast<float>(srcX);
							warpMat2[ii].at<float>(j, i) = static_cast<float>(srcY);
							warpMat[ii].at<Point2f>(j, i) = Point2f(srcX, srcY);
						}
					}
					wM1[ii].upload(warpMat1[ii]);
					wM2[ii].upload(warpMat2[ii]);
				}


		
	
	return 1;
}

char cur_state = 0;
void render()  //渲染过程 贴在天空盒子
{ 
	QueryPerformanceCounter(&nBeginTime);
	src[cur_state].upload(picture_remap.rgbImg, src_stream[cur_state]);
	QueryPerformanceCounter(&nEndTime);
	printf("upload time: %f\n", (double)(nEndTime.QuadPart - nBeginTime.QuadPart) / (double)nFreq.QuadPart);
	int ii;
	cuda::GpuMat dst_mat[6];
	cuda::Stream remap_stream[6];
	cur_state = cur_state ^ 0x01;
	QueryPerformanceCounter(&nBeginTime);
	src_stream[cur_state].waitForCompletion();
	QueryPerformanceCounter(&nEndTime);
	printf("wait time: %f\n", (double)(nEndTime.QuadPart - nBeginTime.QuadPart) / (double)nFreq.QuadPart);

	QueryPerformanceCounter(&nBeginTime);
	for (ii = 0; ii < 6; ii++)
	{
		int faceId = ii;
		dst_mat[ii] = wBuf[ii].mapDevice();
		cv::cuda::remap(
			src[cur_state],
			dst_mat[ii],
			wM1[faceId],
			wM2[faceId],
			CV_INTER_CUBIC,
			BORDER_WRAP,
			cv::Scalar(),
			remap_stream[ii]);
	}
	QueryPerformanceCounter(&nEndTime);
	printf("process time: %f\n", (double)(nEndTime.QuadPart - nBeginTime.QuadPart) / (double)nFreq.QuadPart);
	QueryPerformanceCounter(&nBeginTime);
	for (ii = 0; ii < 6; ii++)
	{
		remap_stream[ii].waitForCompletion();
		wBuf[ii].unmapDevice();
		wBuf[ii].bind(cv::ogl::Buffer::Target::PIXEL_UNPACK_BUFFER);
		glTexSubImage2D(cube[ii], 0, 0, 0, wid, wid, GL_BGR_EXT, GL_UNSIGNED_BYTE, 0);
		//write img
		
		/*
		dst_mat[ii].download(dst[ii]);
		char name[] = "L0.jpg";
		name[1] = '0' + ii;
		imwrite(name, dst[ii]);*/
	}

	QueryPerformanceCounter(&nEndTime);
	printf("render time: %f\n", (double)(nEndTime.QuadPart - nBeginTime.QuadPart) / (double)nFreq.QuadPart);
}

//opengl渲染过程:一开始创建一个纹理绑定在某个模型                 gltexture_2D改成_cubemape
void SetupRC()  //初始化 cubemapa 定点对应
{

	// Cull backs of polygons
	glCullFace(GL_BACK);
	glFrontFace(GL_CCW);
	glEnable(GL_DEPTH_TEST);


	// Set up texture maps        
	glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
	glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
	glPixelStorei(GL_UNPACK_ALIGNMENT, 1);

	glGenTextures(1, &cubeTexture);       //创建一个纹理
	glBindTexture(GL_TEXTURE_CUBE_MAP, cubeTexture); //绑定一个纹理



	//pre upload one frame
	src[1].upload(picture_remap.rgbImg, src_stream[1]);
	for (int ii = 0; ii < 6; ii++)
	{
		int faceId = ii;

		glTexImage2D(cube[ii], 0, GL_RGB, wid, wid, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, 0);// (void *)cvtmap.face[faceId].data); //512*512 贴图
	}
	glGenerateMipmap(GL_TEXTURE_CUBE_MAP);

	viewFrame.MoveForward(-4.0f);
	gltMakeCube(cubeBatch, 20.0f);


	skyBoxShader = gltLoadShaderPairWithAttributes("SkyBox.vp", "SkyBox.fp", 2,
		GLT_ATTRIBUTE_VERTEX, "vVertex",
		GLT_ATTRIBUTE_NORMAL, "vNormal");

	locMVPSkyBox = glGetUniformLocation(skyBoxShader, "mvpMatrix");

	glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);


}




void ShutdownRC(void)
{
	glDeleteTextures(1, &cubeTexture);
}

int ct = 0;
void RenderPic(void) //
{
	//这一段就是读yuv图片
	//fread(yuvImg.data, bufLen * sizeof(unsigned char), 1, pFileIn);
	//memcpy(yuvImg.data, pYuvBuf, bufLen * sizeof(unsigned char));
//	cv::cvtColor(yuvImg, picture_remap.rgbImg, CV_YUV2RGB_I420);
	



	render();
	ct++;
	if (ct == 10)
	{
		start_time = GetTickCount();
	}
	else if (ct > 10)
	{
		end_time = GetTickCount();
		float time = (end_time - start_time)*1.0 / 1000;
		printf("帧率 %f \n", (ct-10) / time);
	}
	glutPostRedisplay();
}


// Called to draw scene
void RenderScene(void)
{
	//cvWaitKey(33);
	frame = cvQueryFrame(capture);
	if (frame) {
		picture_remap.rgbImg = cvarrToMat(frame);
		// Clear the window
		glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

		M3DMatrix44f mCamera;
		M3DMatrix44f mCameraRotOnly;
		M3DMatrix44f mInverseCamera;

		viewFrame.GetCameraMatrix(mCamera, false);
		viewFrame.GetCameraMatrix(mCameraRotOnly, true);
		m3dInvertMatrix44(mInverseCamera, mCameraRotOnly);

		modelViewMatrix.PushMatrix();
		modelViewMatrix.MultMatrix(mCameraRotOnly);
		glUseProgram(skyBoxShader);
		glUniformMatrix4fv(locMVPSkyBox, 1, GL_FALSE, transformPipeline.GetModelViewProjectionMatrix());
		cubeBatch.Draw();
		modelViewMatrix.PopMatrix();
		RenderPic();
		//glutPostRedisplay();
		// Do the buffer Swap
		glutSwapBuffers();

	}
	
}


void SpecialKeys(int key, int x, int y)
{
	if (key == GLUT_KEY_PAGE_UP)
		viewFrame.MoveForward(0.1f);

	if (key == GLUT_KEY_PAGE_DOWN)
		viewFrame.MoveForward(-0.1f);

	if (key == GLUT_KEY_LEFT)
		viewFrame.RotateLocalY(0.1);

	if (key == GLUT_KEY_RIGHT)
		viewFrame.RotateLocalY(-0.1);

	if (key == GLUT_KEY_UP)
		viewFrame.RotateLocalX(0.1f);

	if (key == GLUT_KEY_DOWN)
		viewFrame.RotateLocalX(-0.1f);

	
	
		

	// Refresh the Window
	glutPostRedisplay();
}


void ChangeSize(int w, int h)
{

	// Prevent a divide by zero
	if (h == 0)
		h = 1;

	// Set Viewport to window dimensions
	glViewport(0, 0, w, h);

	viewFrustum.SetPerspective(55.0f, float(w) / float(h), 0.1f, 1000.0f); //放摄像机

	projectionMatrix.LoadMatrix(viewFrustum.GetProjectionMatrix());
	transformPipeline.SetMatrixStacks(modelViewMatrix, projectionMatrix);
}

void FileSetup()
{
	pFileIn = fopen("1920.yuv", "rb+");

	printf("yuv file w: %d, h: %d \n", w, h);

	yuvImg.create(h * 3 / 2, w, CV_8U);

}





int main(int argc, char **argv)
{
//	FileSetup();
	picture_remap.rgbImg.setDefaultAllocator(cv::cuda::HostMem::getAllocator(cv::cuda::HostMem::AllocType::PAGE_LOCKED));
	picture_remap.rgbImg.create(4096, 2048, CV_8UC3);

	QueryPerformanceFrequency(&nFreq);

	glutInit(&argc, argv);

	glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);

	glutInitWindowSize(800, 600);

	glutCreateWindow("OpenGL Cube Maps");

	//这一段就是读yuv图片
//	fread(yuvImg.data, bufLen * sizeof(unsigned char), 1, pFileIn);
//	cv::cvtColor(yuvImg, picture_remap.rgbImg, CV_YUV2RGB_I420);
	capture = cvCreateFileCapture(VideoAddr);	
	int frames = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
	frame = cvQueryFrame(capture);
	picture_remap.rgbImg = cvarrToMat(frame);

	//picture_remap.rgbImg = imread("4k.jpg");
	w = picture_remap.rgbImg.cols;
	h = picture_remap.rgbImg.rows;

	MatInit();


	glutReshapeFunc(ChangeSize);
	glutDisplayFunc(RenderScene);
	glutSpecialFunc(SpecialKeys);

	GLenum err = glewInit();
	if (GLEW_OK != err) {
		fprintf(stderr, "GLEW Error: %s\n", glewGetErrorString(err));
		return 1;
	}
	
	SetupRC();

	glutMainLoop();

	ShutdownRC();

	delete[] pYuvBuf;

	yuvImg.release();

	cvReleaseCapture(&capture);

	fclose(pFileIn);

	return 0;
}

在a.1基础删减不必要的代码,改进功能为切割一张图片为立方体六面图片。
a.0代码虽然只是处理图片,但实验室学长早就考虑到后面处理视频等情况,所以提前应用了兵乓buffer来加载图片到GPU里进行双线程处理,我在a.2中去掉了,在c.0中补上了,即 cv::cuda::Stream src_stream[2];cuda::GpuMat dst_mat[6]; cuda::Stream remap_stream[6];

(尴尬,这一行怎么去掉,,,,,,)

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2\opencv.hpp> 
#include <opencv2\core\opengl.hpp>
#include <iostream>
#include <Windows.h>
#include <vector>
#include <stdio.h>
#include "conio.h"
#include <math.h>
#include <stdlib.h>
#include <GL/gl.h>
#include <opencv2/cudawarping.hpp>
using namespace std;
using namespace cv;

#define M_PI 3.14159265358979323846
GLenum  cube[6] =
{
	GL_TEXTURE_CUBE_MAP_POSITIVE_X,
	GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
	GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
	GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
	GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
	GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
};

enum CubemapFace
{
	CUBEMAP_FACE_BACK,
	CUBEMAP_FACE_LEFT,
	CUBEMAP_FACE_FRONT,
	CUBEMAP_FACE_RIGHT,
	CUBEMAP_FACE_TOP,
	CUBEMAP_FACE_BOTTOM
};
Mat warpMat1[6], warpMat2[6], dst[6];
Mat warpMat[6];
cv::cuda::GpuMat src[2], wM1[6], wM2[6];
cv::cuda::Stream src_stream[2];
ogl::Buffer wBuf[6];
int wid = 1024;
int w = 1920;
int h = 1080;
char cur_state = 0;
struct RGB_pic    //存储修改前文件 yuv->RGB
{
	cv::Mat rgbImg;
	int length;
	int height;

} picture_remap;

template <typename T>
inline T square(const T x)
{
	return x * x;
}

template <typename T>
inline T clamp(const T& x, const T& a, const T& b)
{
	return x < a ? a : x > b ? b : x;
}
inline Vec3f cubemapIndexToVec3(const float x, const float y, const CubemapFace face)
{
	// rotate and flip the direction as a function of the face
	Vec3f dir(x, y, 0.5f);
	Vec3f dirOut = dir;
	switch (face) {
	case CUBEMAP_FACE_BACK:
		dirOut[0] = dir[0];
		dirOut[1] = dir[2];
		dirOut[2] = -dir[1];
		break;
	case CUBEMAP_FACE_LEFT:
		dirOut[0] = -dir[2];
		dirOut[1] = dir[0];
		dirOut[2] = -dir[1];
		break;
	case CUBEMAP_FACE_TOP: break; // no-op
	case CUBEMAP_FACE_BOTTOM:
		dirOut[0] = dir[0];
		dirOut[1] = -dir[1];
		dirOut[2] = -dir[2];
		break;
	case CUBEMAP_FACE_FRONT:
		dirOut[0] = -dir[0];
		dirOut[1] = -dir[2];
		dirOut[2] = -dir[1];
		break;
	case CUBEMAP_FACE_RIGHT:
		dirOut[0] = dir[2];
		dirOut[1] = -dir[0];
		dirOut[2] = -dir[1];
		break;
	}
	return dirOut;
}
void mapEquirectToCubemapCoordinate(
	const float x,
	const float y,
	const CubemapFace& face,
	const Mat& srcEqrMat,
	const float fisheyeFovRadians,
	float& srcX,
	float& srcY)
{

	const Vec3f dir = cubemapIndexToVec3(x, y, face);
	const float r = sqrtf(square(dir[0]) + square(dir[1]));
	const float phi = acosf(dir[2] / norm(dir));
	float theta = r > 0.0f ? acosf(fabs(dir[0] / r)) : 0.0f;

	if (dir[0] > 0 && dir[1] > 0) { // Quadrant I
									// (nothing to do)
	}
	else if (dir[0] <= 0 && dir[1] > 0) { // Quadrant II
		theta = M_PI - theta;
	}
	else if (dir[0] <= 0 && dir[1] <= 0) { // Quadrant III
		theta = M_PI + theta;
	}
	else { // Quadrant IV
		theta = 2 * M_PI - theta;
	}


	const float phiPrime = clamp(phi, 0.0f, fisheyeFovRadians);
	const float thetaPrime = clamp(theta, 0.0f, float(2.0f * M_PI));
	srcX = float(srcEqrMat.cols) * thetaPrime / (2.0f * M_PI);
	srcY = float(srcEqrMat.rows) * phiPrime / fisheyeFovRadians;
}

int MatInit()  //不要动
{


	CubemapFace face;
	//img size
	wid = sqrtf((float(picture_remap.rgbImg.cols) * float(picture_remap.rgbImg.rows)) / 6) + 0.5;

	const float dy = 1.0f / float(wid);
	const float dx = 1.0f / float(wid);
	for (int ii = 0; ii < 6; ii++)
	{
		warpMat1[ii] = Mat(Size(wid, wid), CV_32FC1);
		warpMat2[ii] = Mat(Size(wid, wid), CV_32FC1);
		warpMat[ii] = Mat(Size(wid, wid), CV_32FC2);
		//cv::ogl::Buffer::Target
		wBuf[ii].create(Size(wid, wid), picture_remap.rgbImg.type(), ogl::Buffer::Target::PIXEL_UNPACK_BUFFER);
		switch (ii)
		{
		case 0:
			face = CUBEMAP_FACE_RIGHT;
			break;
		case 1:
			face = CUBEMAP_FACE_LEFT;
			break;
		case 2:
			face = CUBEMAP_FACE_TOP;
			break;
		case 3:
			face = CUBEMAP_FACE_BOTTOM;
			break;
		case 4:
			face = CUBEMAP_FACE_BACK;
			break;
		case 5:
			face = CUBEMAP_FACE_FRONT;
			break;
		}

		for (int j = 0; j < wid; ++j)
		{
			for (int i = 0; i < wid; ++i)
			{
				float srcX;
				float srcY;
				mapEquirectToCubemapCoordinate(
					float(i) * dy - 0.5f,
					float(j) * dx - 0.5f,
					face,
					picture_remap.rgbImg,
					M_PI,
					srcX, srcY);

				warpMat1[ii].at<float>(j, i) = static_cast<float>(srcX);
				warpMat2[ii].at<float>(j, i) = static_cast<float>(srcY);
				warpMat[ii].at<Point2f>(j, i) = Point2f(srcX, srcY);
			}
		}
		wM1[ii].upload(warpMat1[ii]);
		wM2[ii].upload(warpMat2[ii]);
	}




	return 1;
}
int main(int argc, char **argv) {
	picture_remap.rgbImg.setDefaultAllocator(cv::cuda::HostMem::getAllocator(cv::cuda::HostMem::AllocType::PAGE_LOCKED));
	picture_remap.rgbImg.create(4096, 2048, CV_8UC3);

	QueryPerformanceFrequency(&nFreq);

	glutInit(&argc, argv);

	glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);

	glutInitWindowSize(800, 600);

	glutCreateWindow("OpenGL Cube Maps");

	//src[1].upload(picture_remap.rgbImg, src_stream[1]); 

	picture_remap.rgbImg = imread("4k.jpg");//输入一张图片
	src[1].upload(picture_remap.rgbImg, src_stream[1]);

	w = picture_remap.rgbImg.cols;
	h = picture_remap.rgbImg.rows;
	MatInit();
	//src[cur_state].upload(picture_remap.rgbImg, src_stream[cur_state]);
	int ii;
	cuda::GpuMat dst_mat[6];
	cuda::Stream remap_stream[6];
	//cur_state = cur_state ^ 0x01;
	//src_stream[cur_state].waitForCompletion();

	for ( ii = 0; ii < 6; ii++)
	{
		int faceId = ii;
		dst_mat[ii] = wBuf[ii].mapDevice();
		cv::cuda::remap(
			//src[cur_state],
			src[1],
			dst_mat[ii],
			wM1[faceId],
			wM2[faceId],
			CV_INTER_CUBIC,
			BORDER_WRAP,
			cv::Scalar(),
			remap_stream[ii]);
	}
	for (ii = 0; ii < 6; ii++)
	{
		//remap_stream[ii].waitForCompletion();
		wBuf[ii].unmapDevice();
		wBuf[ii].bind(cv::ogl::Buffer::Target::PIXEL_UNPACK_BUFFER);
		glTexSubImage2D(cube[ii], 0, 0, 0, wid, wid, GL_BGR_EXT, GL_UNSIGNED_BYTE, 0);
		//write img

		
		dst_mat[ii].download(dst[ii]);//输出六张图片
		char name[] = "L0.jpg";
		name[1] = '0' + ii;
		imwrite(name, dst[ii]);
	}
	return 0;
}

下面是我改代码时,更换,删减,有用,没用的都有,很乱,没有解释:

 
//算当前帧率
if (++ct==30){ct = 0;end_time = GetTickCount();printf("帧率 %f \n", 30000.0/ (end_time - start_time));start_time = end_time;}//double rate = capture.get(CV_CAP_PROP_FPS);//获取原始视频帧率long totalFrameNumber = capture.get(CV_CAP_PROP_FRAME_COUNT);//获取总帧数int frameH = (int) cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_HEIGHT); int frameW = (int) cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_WIDTH); int fps = (int) cvGetCaptureProperty(pCapture, CV_CAP_PROP_FPS); int numFrames = (int) cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_COUNT); int delay = 1000/rate;//两帧间的间隔时间int c = waitKey(delay);//这个算帧率会出错,不知道为啥double fps;double t = 0;t = ((double)cv::getTickCount() - t) / cv::getTickFrequency();fps = 1.0 / t;//没用waitKey(300);RenderFrame()-SetupRC//读取视频宏定义啥的CvCapture * capture = NULL;char * VideoAddr = "zzz.mp4";IplImage * frame;Mat rgbImg;#define ImgRows 512#define ImgCols 512//读取视频+获取下一帧+帧转为Mat型capture = cvCreateFileCapture(VideoAddr);int frames = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);frame = cvQueryFrame(capture);rgbImg = cvarrToMat(frame);//RenderScene-SetupRCframe = cvQueryFrame(capture);if (frame)rgbImg = cvarrToMat(frame);createCubeMapFace(rgbImg,img[0],0 , ImgRows, ImgCols);//网上找的切割映射函数,效果很差,没有实验室学者写得代码好createCubeMapFace(rgbImg,img[1],1 , ImgRows, ImgCols);createCubeMapFace(rgbImg,img[2],2 , ImgRows, ImgCols);createCubeMapFace(rgbImg,img[3],3 , ImgRows, ImgCols);createCubeMapFace(rgbImg,img[4],4 , ImgRows, ImgCols);createCubeMapFace(rgbImg,img[5],5 , ImgRows, ImgCols);// 网上找的切割映射函数,效果很差,没有实验室学者写得代码好#include <opencv2/core/core.hpp>#include <opencv2/imgproc/imgproc.hpp>#include <opencv2/highgui/highgui.hpp>const float M_PI = 3.1415926;float faceTransform[6][2] ={{0, 0},{M_PI / 2, 0},{M_PI, 0},{-M_PI / 2, 0},{0, -M_PI / 2},{0, M_PI / 2}};inline void createCubeMapFace(const Mat &in, Mat &face, int faceId = 0, const int width = -1, const int height = -1){float inWidth = in.cols;float inHeight = in.rows;// Allocate mapMat mapx(width, height, CV_32F);//(in.size(), CV_32F);Mat mapy(width, height, CV_32F);//(in.size(), CV_32F);// Calculate adjacent (ak) and opposite (an) of the// triangle that is spanned from the sphere center//to our cube face.const float an = sin(M_PI / 4);const float ak = cos(M_PI / 4);const float ftu = faceTransform[faceId][0];const float ftv = faceTransform[faceId][1];// For each point in the target image,// calculate the corresponding source coordinates.对目标图像的每个点计算相应的源坐标系for(int y = 0; y < height; y++) {for(int x = 0; x < width; x++) {// Map face pixel coordinates to [-1, 1] on planefloat nx = (float)y / (float)height - 0.5f;float ny = (float)x / (float)width - 0.5f;nx *= 2;ny *= 2;// Map [-1, 1] plane coords to [-an, an]// thats the coordinates in respect to a unit sphere// that contains our box.nx *= an;ny *= an;float u, v;// Project from plane to sphere surface.if(ftv == 0) {// Center facesu = atan2(nx, ak);v = atan2(ny * cos(u), ak);u += ftu;} else if(ftv > 0) {// Bottom facefloat d = sqrt(nx * nx + ny * ny);v = M_PI / 2 - atan2(d, ak);u = atan2(ny, nx);} else {// Top facefloat d = sqrt(nx * nx + ny * ny);v = -M_PI / 2 + atan2(d, ak);u = atan2(-ny, nx);}// Map from angular coordinates to [-1, 1], respectively.u = u / (M_PI);v = v / (M_PI / 2);// Warp around, if our coordinates are out of bounds.while (v < -1) {v += 2;u += 1;}while (v > 1) {v -= 2;u += 1;}while(u < -1) {u += 2;}while(u > 1) {u -= 2;}// Map from [-1, 1] to in texture spaceu = u / 2.0f + 0.5f;v = v / 2.0f + 0.5f;u = u * (inWidth - 1);v = v * (inHeight - 1);// Save the result for this pixel in mapmapx.at<float>(x, y) = u;mapy.at<float>(x, y) = v;}}// Recreate output image if it has wrong size or type./*if(face.cols != width || face.rows != height ||face.type() != in.type()) {face = Mat(width, height, in.type());}*/// Do actual resampling using OpenCV's remapremap(in, face, mapx, mapy, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值