ffmpeg video to opengl texture

I'm trying to render frames grabbed and converted from a video using ffmpeg to an opengl texture to be put on a quad. I've pretty much exhausted google and not found an answer, well I've found answers but none of them seem to have worked.

Basically, I am using avcodec_decode_video2() to decode the frame and then sws_scale() to convert the frame to RGB and then glTexSubImage2D() to create an openGL texture from it but can't seem to get anything to work.

I've made sure the "destination" AVFrame has power of 2 dimensions in the SWS Context setup. Here is my code:

SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width,
                pCodecCtx->height, pCodecCtx->pix_fmt, 512,
                256, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
                NULL, NULL);

//While still frames to read
while(av_read_frame(pFormatCtx, &packet)>=0) {
    glClear(GL_COLOR_BUFFER_BIT);

    //If the packet is from the video stream
    if(packet.stream_index == videoStream) {
        //Decode the video
        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

        //If we got a frame then convert it and put it into RGB buffer
        if(frameFinished) {
            printf("frame finished: %i\n", number);
            sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

            glBindTexture(GL_TEXTURE_2D, texture);
            //gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data);
            glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 512, 256, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
            SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, number);
            number++;
        }
    }

    glColor3f(1,1,1);
    glBindTexture(GL_TEXTURE_2D, texture);
    glBegin(GL_QUADS);
        glTexCoord2f(0,1);
        glVertex3f(0,0,0);

        glTexCoord2f(1,1);
        glVertex3f(pCodecCtx->width,0,0);

        glTexCoord2f(1,0);
        glVertex3f(pCodecCtx->width, pCodecCtx->height,0);

        glTexCoord2f(0,0);
        glVertex3f(0,pCodecCtx->height,0);

    glEnd();

As you can see in that code, I am also saving the frames to .ppm files just to make sure they are actually rendering, which they are.

The file being used is a .wmv at 854x480, could this be the problem? The fact I'm just telling it to go 512x256?

P.S. I've looked at this stackoverflow question but it didn't help.

Also, I have glEnable(GL_TEXTURE_2D); as well and have tested it by just loading in a normal bmp.

EDIT

I'm getting an image on the screen now but it is a garbled mess, I'm guessing something to do with changing things to a power of 2 (in the decode, swscontext and gluBuild2DMipmaps as shown in my code). I'm usually nearly exactly the same code as shown above, only I've changed glTexSubImage2D to gluBuild2DMipmaps and changed the types to GL_RGBA.

Here is what the frame looks like:

EDIT AGAIN

Just realised I haven't showed the code for how pFrameRGB is set up:

//Allocate video frame for 24bit RGB that we convert to.
AVFrame *pFrameRGB;
pFrameRGB = avcodec_alloc_frame();

if(pFrameRGB == NULL) {
    return -1;
}

//Allocate memory for the raw data we get when converting.
uint8_t *buffer;
int numBytes;
numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) av_malloc(numBytes*sizeof(uint8_t));

//Associate frame with our buffer
avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24,
    pCodecCtx->width, pCodecCtx->height);

Now that I ahve changed the PixelFormat in avgpicture_get_size to PIX_FMT_RGB24, I've done that in SwsContext as well and changed GluBuild2DMipmaps to GL_RGB and I get a slightly better image but it looks like I'm still missing lines and it's still a bit stretched:

Another Edit

After following Macke's advice and passing the actual resolution to OpenGL I get the frames nearly proper but still a bit skewed and in black and white, also it's only getting 6fps now rather than 110fps:

I'm trying to render frames grabbed and converted from a video using ffmpeg to an opengl texture to be put on a quad. I've pretty much exhausted google and not found an answer, well I've found answers but none of them seem to have worked.

Basically, I am using avcodec_decode_video2() to decode the frame and then sws_scale() to convert the frame to RGB and then glTexSubImage2D() to create an openGL texture from it but can't seem to get anything to work.

I've made sure the "destination" AVFrame has power of 2 dimensions in the SWS Context setup. Here is my code:

SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width,
                pCodecCtx->height, pCodecCtx->pix_fmt, 512,
                256, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
                NULL, NULL);

//While still frames to read
while(av_read_frame(pFormatCtx, &packet)>=0) {
    glClear(GL_COLOR_BUFFER_BIT);

    //If the packet is from the video stream
    if(packet.stream_index == videoStream) {
        //Decode the video
        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

        //If we got a frame then convert it and put it into RGB buffer
        if(frameFinished) {
            printf("frame finished: %i\n", number);
            sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

            glBindTexture(GL_TEXTURE_2D, texture);
            //gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data);
            glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 512, 256, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
            SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, number);
            number++;
        }
    }

    glColor3f(1,1,1);
    glBindTexture(GL_TEXTURE_2D, texture);
    glBegin(GL_QUADS);
        glTexCoord2f(0,1);
        glVertex3f(0,0,0);

        glTexCoord2f(1,1);
        glVertex3f(pCodecCtx->width,0,0);

        glTexCoord2f(1,0);
        glVertex3f(pCodecCtx->width, pCodecCtx->height,0);

        glTexCoord2f(0,0);
        glVertex3f(0,pCodecCtx->height,0);

    glEnd();

As you can see in that code, I am also saving the frames to .ppm files just to make sure they are actually rendering, which they are.

The file being used is a .wmv at 854x480, could this be the problem? The fact I'm just telling it to go 512x256?

P.S. I've looked at this stackoverflow question but it didn't help.

Also, I have glEnable(GL_TEXTURE_2D); as well and have tested it by just loading in a normal bmp.

EDIT

I'm getting an image on the screen now but it is a garbled mess, I'm guessing something to do with changing things to a power of 2 (in the decode, swscontext and gluBuild2DMipmaps as shown in my code). I'm usually nearly exactly the same code as shown above, only I've changed glTexSubImage2D to gluBuild2DMipmaps and changed the types to GL_RGBA.

Here is what the frame looks like:

Ffmpeg as OpenGL Texture garbled

EDIT AGAIN

Just realised I haven't showed the code for how pFrameRGB is set up:

//Allocate video frame for 24bit RGB that we convert to.
AVFrame *pFrameRGB;
pFrameRGB = avcodec_alloc_frame();

if(pFrameRGB == NULL) {
    return -1;
}

//Allocate memory for the raw data we get when converting.
uint8_t *buffer;
int numBytes;
numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) av_malloc(numBytes*sizeof(uint8_t));

//Associate frame with our buffer
avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24,
    pCodecCtx->width, pCodecCtx->height);

Now that I ahve changed the PixelFormat in avgpicture_get_size to PIX_FMT_RGB24, I've done that in SwsContext as well and changed GluBuild2DMipmaps to GL_RGB and I get a slightly better image but it looks like I'm still missing lines and it's still a bit stretched:

Ffmpeg Garbled OpenGL Texture 2

Another Edit

After following Macke's advice and passing the actual resolution to OpenGL I get the frames nearly proper but still a bit skewed and in black and white, also it's only getting 6fps now rather than 110fps:

enter image description here

P.S.

I've got a function to save the frames to image after sws_scale() and they are coming out fine as colour and everything so something in OGL is making it B&W.

LAST EDIT

Working! Okay I have it working now, basically I am not padding out the texture to a power of 2 and just using the resolution the video is.

I got the texture showing up properly with a lucky guess at the correct glPixelStorei()

glPixelStorei(GL_UNPACK_ALIGNMENT, 2);

Also, if anyone else has the subimage() showing blank problem like me, you have to fill the texture at least once with glTexImage2D() and so I use it once in the loop and then use glTexSubImage2D() after that.

Thanks Macke and datenwolf for all your help.

转帖:http://stackoverflow.com/questions/6495523/ffmpeg-video-to-opengl-texture


### RK3588平台上使用FFmpegOpenGL进行多媒体处理或视频渲染的方法 #### 1. 环境准备 在RK3588上进行多媒体处理和视频渲染前,需确保已安装必要的库和支持工具。具体来说,需要配置好FFmpegOpenGL环境。 对于FFmpeg的支持,在特定的操作系统环境下(如Debian),可以通过包管理器来简化这一过程[^3]: ```bash sudo apt-get update && sudo apt-get install ffmpeg libglu1-mesa-dev freeglut3-dev mesa-common-dev ``` 这一步骤能够确保编译环境中具备了基本的FFmpeg功能模块以及OpenGL开发所需的头文件和其他资源。 #### 2. FFmpeg解码并传递给OpenGL纹理 要实现在Picture控件或其他窗口组件内显示由FFmpeg解码后的YUV420P格式图像数据至指定区域的功能,通常会涉及到以下几个方面的工作流程[^1]: - **初始化FFmpeg**: 打开输入流、读取元数据、查找解码器等操作。 - **设置OpenGL上下文**: 创建GLContext,并准备好用于绘制的目标Surface。 - **创建着色器程序**: 编写顶点着色器(Vertex Shader) 和 片段着色器(Fragment Shader),以便于后续将像素颜色转换成屏幕上的可见形式。 下面是一个简单的C++代码片段展示如何利用FFmpeg解析帧并将它们作为纹理上传到OpenGL中: ```cpp extern "C" { #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> } // 初始化AVCodecContext *pCodecCtx; void init_decoder(const char* filename){ AVFormatContext *pFormatCtx = nullptr; int videoStream; av_register_all(); if(avformat_open_input(&pFormatCtx, filename, NULL, NULL)!=0) exit(-1); // Couldn't open file if(avformat_find_stream_info(pFormatCtx,NULL)<0) exit(-1); // Couldn't find stream information videoStream=av_find_best_stream(pFormatCtx,AVMEDIA_TYPE_VIDEO,-1,-1,nullptr,0); pCodecCtx=avcodec_alloc_context3(NULL); avcodec_parameters_to_context(pCodecCtx,pFormatCtx->streams[videoStream]->codecpar); const AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if(!pCodec || avcodec_open2(pCodecCtx, pCodec, NULL) < 0) exit(-1); // Unsupported codec or failed to open it. } ``` 此部分负责打开媒体文件并找到其中的第一个可用视频轨道及其对应的解码器实例化对象`pCodecCtx`. 接着定义函数用来获取每一帧的数据并通过OpenGL API将其呈现出来: ```cpp GLuint textureID; // 假定已经生成了一个有效的纹理名称. bool process_frame(AVFrame *frame){ GLuint fbo; glGenFramebuffers(1,&fbo); glBindFramebuffer(GL_FRAMEBUFFER,fbo); glFramebufferTexture2D(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,GL_TEXTURE_2D,textureID,0); // 将 Y,U,V 数据分别绑定为三个不同的纹理单元... // 这里省略了一些细节... GLint oldProgram; glGetIntegerv(GL_CURRENT_PROGRAM,&oldProgram); glUseProgram(shader_program_handle); glUniform1i(glGetUniformLocation(shader_program_handle,"tex_y"),0); glUniform1i(glGetUniformLocation(shader_program_handle,"tex_u"),1); glUniform1i(glGetUniformLocation(shader_program_handle,"tex_v"),2); draw_quad(); // 绘制四边形覆盖整个视口区域 glUseProgram(oldProgram); glBindFramebuffer(GL_FRAMEBUFFER,0); return true; } ``` 这段伪代码展示了如何在一个自定义的FBO (Frame Buffer Object) 中完成对单个视频帧的渲染工作。注意这里假设存在一个预先编写好的片断着色器(`shader_program_handle`) 来执行从YUV空间向RGB色彩模型之间的变换计算。 最后需要注意的是,由于RK3588内部集成了先进的GPU硬件加速特性,因此建议尽可能多地利用这些优势来进行高效的图形运算和视频编解码任务[^2].
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值