原创作品,转载请注明出处。抛砖引玉,只说核心部分。
首先上效果图,已经将三维模型出色显示在二维摄像头数据之上。
1、在github拉一份处理APPLE CVPixelBufferRef的view
github地址:https://github.com/reetyo/AAPLEAGLLayer
这个Layer处理了来自camera的代理中的CVPixelBufferRef,里面存的是YUV420数据
glActiveTexture(GL_TEXTURE0);
//亮度纹理-Y纹理
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
_videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RED_EXT,
frameWidth,
frameHeight,
GL_RED_EXT,
GL_UNSIGNED_BYTE,
0,
&_lumaTexture);
if (err) {
NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
glBindTexture(CVOpenGLESTextureGetTarget(_lumaTexture), CVOpenGLESTextureGetName(_lumaTexture));
//配置纹理放大/缩小过滤方式以及纹理围绕S/T环绕方式
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//UV-plane 纹理
if(planeCount == 2) {
glActiveTexture(GL_TEXTURE1);
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
_videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RG_EXT,
frameWidth / 2,
frameHeight / 2,
GL_RG_EXT,
GL_UNSIGNED_BYTE,
1,
&_chromaTexture);
if (err) {
NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
//5.绑定纹理
glBindTexture(CVOpenGLESTextureGetTarget(_chromaTexture), CVOpenGLESTextureGetName(_chromaTexture));
//6.配置纹理放大/缩小过滤方式以及纹理围绕S/T环绕方式
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
其中调用了两次glactiveTexture,一层配置Y,一层配置UV,关键之处在这,二维数据调用了前两层纹理,那么在加载三维模型所需要的若干张纹理需要开启第三层纹理,否则你在弃用bindTexture时,OpenGL默认bind的是0层 也就是GL_TESTURE0,你的模型上会出现全是红色的摄像头数据,那是因为是画的是摄像头的Y数据。
2、透视投影和平行投影 共生的问题。
摄像头数据以平行投影的方式呈现在屏幕之上,附加三维需要在frameBuf里面绑定depthBuf,绑定深度缓冲区之前请先绑定colorBuf(可以自行测试 调换顺序),处理平行投影和透视投影的深度问题,
void ortho( vec3 left, vec3 right, float bottom, float top, float zNear, float zFar )
平行投影的函数中后两个形参zNear和zFar定义了平行投影的深度信息
void perspective(float fovy, float aspect, float zNear, float zFar)
透视投影问题同以上。
在update函数之中首先清除颜色缓冲区和深度缓冲区,启用深度信息,绘制摄像头数据,开启了TESTURE0和TESTURE1两层纹理,将摄像头的position的Z坐标设置为0,Near和Far的值分别为-1和1,再次清除深度缓冲区,然后绘制三维数据,开启TESTURE2,注意此时的三维数据的z值必须大于0,否者会被平行投影的数据遮挡。
末尾附上shader代码:
#pragma mark - yuv420转rgb shader
//顶点着色器代码
const GLchar *shader_vsh = (const GLchar*)"attribute vec4 position;"
"attribute vec2 texCoord;"
"uniform float preferredRotation;"
"varying vec2 texCoordVarying;"
"uniform mat4 mvp;"
"void main()"
"{"
" mat4 rotationMatrix = mat4(cos(preferredRotation), -sin(preferredRotation), 0.0, 0.0,"
" sin(preferredRotation), cos(preferredRotation), 0.0, 0.0,"
" 0.0, 0.0, 1.0, 0.0,"
" 0.0, 0.0, 0.0, 1.0);"
" gl_Position =rotationMatrix * mvp * position ;"
" texCoordVarying = texCoord;"
"}";
//片元着色器代码
const GLchar *shader_fsh = (const GLchar*)"varying highp vec2 texCoordVarying;"
"precision mediump float;"
"uniform sampler2D SamplerY;"
"uniform sampler2D SamplerUV;"
"uniform mat3 colorConversionMatrix;"
"void main()"
"{"
" mediump vec3 yuv;"
" lowp vec3 rgb;"
" yuv.x = (texture2D(SamplerY, texCoordVarying).r - (16.0/255.0));"
" yuv.yz = (texture2D(SamplerUV, texCoordVarying).rg - vec2(0.5, 0.5));"
" rgb = colorConversionMatrix * yuv;"
" gl_FragColor = vec4(rgb, 1);"
"}";
#pragma mark - 三维 shader
//顶点着色器代码
const GLchar *shaderv = (const GLchar*)
"attribute vec4 position;"
"attribute vec2 texCoord;"
"uniform mat4 mvp;"
"varying vec2 varyTextCoord;"
"void main()"
"{"
" gl_Position = mvp * position;"
" varyTextCoord = texCoord;"
"}";
//片元着色器代码
const GLchar *shaderf = (const GLchar*)
"varying highp vec2 varyTextCoord;"
"precision mediump float;"
"uniform sampler2D colorMap;"
"void main()"
"{"
" gl_FragColor = texture2D(colorMap, varyTextCoord);"
"}";