android jni基于ffmpeg,opengles,egl的yuv视频播放功能

ffmpeg+opengl,难点就是怎么把数据给opengl渲染

其实很简单,只要在GLSurfaceView内创建Surface再传给jni层就能进行渲染了,但这是java层进行渲染,我需要jni进行渲染,所以就放弃GLSurfaceView改用egl

开始贴代码

java层

public class VideoSurfaceView extends SurfaceView implements SurfaceHolder.Callback{
    String videoPath = "/storage/emulated/0/360/80s.mp4";
    public SurfaceHolder surfaceHolder;
    public VideoSurfaceView(Context context) {
        super(context);
        init();
    }
    public VideoSurfaceView(Context context, AttributeSet attributeSet) {
        super(context,attributeSet);
        init();
    }
    private void init(){
        surfaceHolder = getHolder();
        surfaceHolder.addCallback(this);
    }
    public void surfaceCreated(SurfaceHolder holder) {
    }

    public void surfaceDestroyed(SurfaceHolder holder) {

    }

    public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) {
        Thread thread = new Thread(){
            @Override
            public void run() {
                super.run();
                videoPlay(videoPath,surfaceHolder.getSurface());
            }
        };
        thread.start();
    }
 
    static {
        System.loadLibrary("videoPlay");
    }
public native void videoPlay(String path Surface surface) ;}

代码很简单,也是传Surface给jni,不过这个Surface是给egl用的

jni代码

extern "C"
JNIEXPORT void JNICALL
Java_com_example_ffmpeg_1yuv_MainActivity_videoPlay(JNIEnv *env, jobject instance, jstring path_,
                                                    jobject surface) {
    const char *path = env->GetStringUTFChars(path_, 0);

    // TODO
/***
     * ffmpeg 初始化
     * **/
    av_register_all();
    AVFormatContext *fmt_ctx = avformat_alloc_context();
    if (avformat_open_input(&fmt_ctx, path, NULL, NULL) < 0) {
        return;
    }
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        return;
    }
    AVStream *avStream = NULL;
    int video_stream_index = -1;
    for (int i = 0; i < fmt_ctx->nb_streams; i++) {
        if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            avStream =fmt_ctx->streams[i];
            video_stream_index = i;
            break;
        }
    }
    if (video_stream_index == -1) {
        return;
    }
    AVCodecContext *codec_ctx = avcodec_alloc_context3(NULL);
    avcodec_parameters_to_context(codec_ctx, avStream->codecpar);
    AVCodec *avCodec = avcodec_find_decoder(codec_ctx->codec_id);
    if (avcodec_open2(codec_ctx, avCodec, NULL) < 0) {
        return;
    }
    int y_size = codec_ctx->width * codec_ctx->height;
    AVPacket *pkt = (AVPacket *) malloc(sizeof(AVPacket));
    av_new_packet(pkt, y_size);

    /**
    *初始化egl
    **/
    EGLConfig eglConf;
    EGLSurface eglWindow;
    EGLContext eglCtx;
    int windowWidth;
    int windowHeight;
    ANativeWindow *nativeWindow = ANativeWindow_fromSurface(env, surface);

    EGLint configSpec[] = { EGL_RED_SIZE, 8,
                            EGL_GREEN_SIZE, 8,
                            EGL_BLUE_SIZE, 8,
                            EGL_SURFACE_TYPE, EGL_WINDOW_BIT, EGL_NONE };

    EGLDisplay eglDisp = eglGetDisplay(EGL_DEFAULT_DISPLAY);
    EGLint eglMajVers, eglMinVers;
    EGLint numConfigs;
    eglInitialize(eglDisp, &eglMajVers, &eglMinVers);
    eglChooseConfig(eglDisp, configSpec, &eglConf, 1, &numConfigs);

    eglWindow = eglCreateWindowSurface(eglDisp, eglConf,nativeWindow, NULL);

    eglQuerySurface(eglDisp,eglWindow,EGL_WIDTH,&windowWidth);
    eglQuerySurface(eglDisp,eglWindow,EGL_HEIGHT,&windowHeight);
    const EGLint ctxAttr[] = {
            EGL_CONTEXT_CLIENT_VERSION, 2,
            EGL_NONE
    };
    eglCtx = eglCreateContext(eglDisp, eglConf,EGL_NO_CONTEXT, ctxAttr);

    eglMakeCurrent(eglDisp, eglWindow, eglWindow, eglCtx);


    /**
     * 设置opengl 要在egl初始化后进行
     * **/
    float *vertexData = new float[12]{
            1.0f, -1.0f, 0.0f,
            -1.0f, -1.0f, 0.0f,
            1.0f, 1.0f, 0.0f,
            -1.0f, 1.0f, 0.0f
    };

    float *textureVertexData = new float[8]{
            1.0f, 0.0f,//右下
            0.0f, 0.0f,//左下
            1.0f, 1.0f,//右上
            0.0f, 1.0f//左上
    };
    ShaderUtils *shaderUtils = new ShaderUtils();

    GLuint programId = shaderUtils->createProgram(vertexShaderString,fragmentShaderString );
    delete shaderUtils;
    GLuint aPositionHandle = (GLuint) glGetAttribLocation(programId, "aPosition");
    GLuint aTextureCoordHandle = (GLuint) glGetAttribLocation(programId, "aTexCoord");

    GLuint textureSamplerHandleY = (GLuint) glGetUniformLocation(programId, "yTexture");
    GLuint textureSamplerHandleU = (GLuint) glGetUniformLocation(programId, "uTexture");
    GLuint textureSamplerHandleV = (GLuint) glGetUniformLocation(programId, "vTexture");



    //因为没有用矩阵所以就手动自适应
    int videoWidth = codec_ctx->width;
    int videoHeight = codec_ctx->height;

    int left,top,viewWidth,viewHeight;
    if(windowHeight > windowWidth){
        left = 0;
        viewWidth = windowWidth;
        viewHeight = (int)(videoHeight*1.0f/videoWidth*viewWidth);
        top = (windowHeight - viewHeight)/2;
    }else{
        top = 0;
        viewHeight = windowHeight;
        viewWidth = (int)(videoWidth*1.0f/videoHeight*viewHeight);
        left = (windowWidth - viewWidth)/2;
    }
    glViewport(left, top, viewWidth, viewHeight);

    glUseProgram(programId);
    glEnableVertexAttribArray(aPositionHandle);
    glVertexAttribPointer(aPositionHandle, 3, GL_FLOAT, GL_FALSE,
                          12, vertexData);
    glEnableVertexAttribArray(aTextureCoordHandle);
    glVertexAttribPointer(aTextureCoordHandle,2,GL_FLOAT,GL_FALSE,8,textureVertexData);
    /***
     * 初始化空的yuv纹理
     * **/
    GLuint yTextureId;
    GLuint uTextureId;
    GLuint vTextureId;
    glGenTextures(1,&yTextureId);
    glActiveTexture(GL_TEXTURE0);
    glBindTexture(GL_TEXTURE_2D,yTextureId);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,
                    GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER, GL_LINEAR);

    glUniform1i(textureSamplerHandleY,0);

    glGenTextures(1,&uTextureId);
    glActiveTexture(GL_TEXTURE1);
    glBindTexture(GL_TEXTURE_2D,uTextureId);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,
                    GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER, GL_LINEAR);

    glUniform1i(textureSamplerHandleU,1);

    glGenTextures(1,&vTextureId);
    glActiveTexture(GL_TEXTURE2);
    glBindTexture(GL_TEXTURE_2D,vTextureId);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,
                    GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER, GL_LINEAR);

    glUniform1i(textureSamplerHandleV,2);


    /***
     * 开始解码
     * **/
    int ret;
    while (1) {
        if (av_read_frame(fmt_ctx, pkt) < 0) {
            //播放结束
            break;
        }
        if (pkt->stream_index == video_stream_index) {
            ret = avcodec_send_packet(codec_ctx, pkt);
            if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
                av_packet_unref(pkt);
                continue;
            }
            AVFrame *yuvFrame = av_frame_alloc();
            ret = avcodec_receive_frame(codec_ctx, yuvFrame);
            if (ret < 0 && ret != AVERROR_EOF) {
                av_frame_free(&yuvFrame);
                av_packet_unref(pkt);
                continue;
            }
            /***
              * 解码后的数据更新到yuv纹理中
            * **/


            glActiveTexture(GL_TEXTURE0);
            glBindTexture(GL_TEXTURE_2D, yTextureId);
            glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, yuvFrame->linesize[0], yuvFrame->height,0, GL_LUMINANCE, GL_UNSIGNED_BYTE, yuvFrame->data[0]);

            glActiveTexture(GL_TEXTURE1);
            glBindTexture(GL_TEXTURE_2D, uTextureId);
            glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE,  yuvFrame->linesize[1], yuvFrame->height/2,0, GL_LUMINANCE, GL_UNSIGNED_BYTE, yuvFrame->data[1]);

            glActiveTexture(GL_TEXTURE2);
            glBindTexture(GL_TEXTURE_2D, vTextureId);
            glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE,  yuvFrame->linesize[2], yuvFrame->height/2,0, GL_LUMINANCE, GL_UNSIGNED_BYTE, yuvFrame->data[2]);


            /***
            * 纹理更新完成后开始绘制
            ***/
            glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);

            glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);

            eglSwapBuffers(eglDisp, eglWindow);

            av_frame_free(&yuvFrame);
        }
        av_packet_unref(pkt);
    }
    /***
     * 释放资源
     * **/
    delete vertexData;
    delete textureVertexData;

    eglMakeCurrent(eglDisp, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
    eglDestroyContext(eglDisp, eglCtx);
    eglDestroySurface(eglDisp, eglWindow);
    eglTerminate(eglDisp);
    eglDisp = EGL_NO_DISPLAY;
    eglWindow = EGL_NO_SURFACE;
    eglCtx = EGL_NO_CONTEXT;

    avcodec_close(codec_ctx);
    avformat_close_input(&fmt_ctx);

    env->ReleaseStringUTFChars(path_, path);
}

shaderUtils.h

class ShaderUtils {
public:
    GLuint createProgram(const char *vertexSource, const char *fragmentSource);

    GLuint loadShader(GLenum shaderType, const char *source);
};

shaderUtils.cpp

GLuint ShaderUtils::createProgram(const char *vertexSource, const char *fragmentSource) {
    GLuint vertexShader = loadShader(GL_VERTEX_SHADER, vertexSource);
    if (!vertexShader) {
        return 0;
    }
    GLuint pixelShader = loadShader(GL_FRAGMENT_SHADER, fragmentSource);
    if (!pixelShader) {
        return 0;
    }

    GLuint program = glCreateProgram();
    if (program != 0) {
        glAttachShader(program, vertexShader);
        glAttachShader(program, pixelShader);
        glLinkProgram(program);
        GLint  linkStatus = 0;
        glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
        if (!linkStatus) {
            GLint info_length = 0;
            glGetProgramiv(program, GL_INFO_LOG_LENGTH, &info_length);
            if(info_length){
                char* buf = (char*)malloc(info_length * sizeof(char));
                glGetProgramInfoLog(program, info_length, NULL, buf);
                free(buf);
            }
            glDeleteProgram(program);
            program = 0;
        }
    }
    return program;
}
GLuint ShaderUtils::loadShader(GLenum shaderType, const char *source) {
    GLuint shader = glCreateShader(shaderType);
    if (shader != 0) {
        glShaderSource(shader,1, &source,NULL);
        glCompileShader(shader);
        GLint compiled = 0;
        glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
        if (!compiled) {
            GLint info_length = 0;
            glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &info_length);
            if(info_length){
                char* buf = (char*)malloc(info_length * sizeof(char));
                if(buf){ glGetShaderInfoLog(shader, info_length, NULL, buf);
                }
                free(buf);
            }
            glDeleteShader(shader);shader = 0;
        }
    }
    return shader;
}

最后是shader

#define GET_STR(x) #x
const char *vertexShaderString = GET_STR(
        attribute vec4 aPosition;
        attribute vec2 aTexCoord;
        varying vec2 vTexCoord;
        void main() {
            vTexCoord=vec2(aTexCoord.x,1.0-aTexCoord.y);
            gl_Position = aPosition;
        }
);
const char *fragmentShaderString = GET_STR(
        precision mediump float;
        varying vec2 vTexCoord;
        uniform sampler2D yTexture;
        uniform sampler2D uTexture;
        uniform sampler2D vTexture;
        void main() {
            vec3 yuv;
            vec3 rgb;
            yuv.r = texture2D(yTexture, vTexCoord).r;
            yuv.g = texture2D(uTexture, vTexCoord).r - 0.5;
            yuv.b = texture2D(vTexture, vTexCoord).r - 0.5;
            rgb = mat3(1.0,       1.0,         1.0,
                       0.0,       -0.39465,  2.03211,
                       1.13983, -0.58060,  0.0) * yuv;
            gl_FragColor = vec4(rgb, 1.0);
        }
);

shader代码是从雷神那里复制的

2018/03/26更新
yuv纹理数据的更新可以用AVFrame的linesize来设置宽,比width更准确
glBindTexture(GL_TEXTURE_2D, texture[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, frame->linesize[0], frame->height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, frame->data[0]);
glBindTexture(GL_TEXTURE_2D, texture[1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, frame->linesize[1], frame->height / 2, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, frame->data[1]);
glBindTexture(GL_TEXTURE_2D, texture[2]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, frame->linesize[2], frame->height / 2, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, frame->data[2]);
由于老代码没找到就又复制到了新项目里试了试,基本没问题,发现java函数videoPlay多了个参数就删掉了,再简单优化了下代码
代码: 百度云

密码:l1w6
下边再贴个效果图吧,因为没有做时间设置,就跟快进一样



  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 8
    评论
这是一个比较复杂的例子,需要结合Java和C++代码实现。下面是大致的实现思路: 1. 在Java代码中创建一个Surface对象,并将其传递给C++代码进行显示。 ```java // 创建Surface对象 SurfaceView mSurfaceView = findViewById(R.id.surface_view); Surface mSurface = mSurfaceView.getHolder().getSurface(); // 将Surface对象传递给C++代码 nativeInit(mSurface); ``` 2. 在C++代码中,将传递过来的Surface对象转换为EGLSurface,并创建OpenGL上下文。 ```c++ void nativeInit(JNIEnv *env, jobject surface) { // 将Java层传递的Surface对象转换为EGLSurface ANativeWindow *window = ANativeWindow_fromSurface(env, surface); EGLSurface eglSurface = eglCreateWindowSurface(eglDisplay, eglConfig, window, NULL); ANativeWindow_release(window); // 创建OpenGL上下文 EGLContext eglContext = eglCreateContext(eglDisplay, eglConfig, EGL_NO_CONTEXT, contextAttribs); eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext); // 初始化OpenGL渲染器 renderer = new OpenGLRenderer(); renderer->init(); } ``` 3. 在C++代码中,通过OpenGL渲染器将视频帧绘制到EGLSurface上。 ```c++ void renderFrame() { // 绘制视频帧 renderer->drawFrame(); // 将渲染结果刷新到屏幕上 eglSwapBuffers(eglDisplay, eglSurface); } ``` 4. 在Java代码中,通过MediaPlayer获取视频数据,并将其传递给C++代码进行渲染。 ```java // 创建MediaPlayer对象 MediaPlayer mMediaPlayer = new MediaPlayer(); mMediaPlayer.setDataSource(videoPath); // 准备MediaPlayer mMediaPlayer.prepare(); mMediaPlayer.start(); // 获取视频帧数据 Bitmap bitmap = Bitmap.createBitmap(videoWidth, videoHeight, Bitmap.Config.ARGB_8888); mMediaPlayer.getFrameAtTime(currentTimeUs, bitmap); // 将视频帧数据传递给C++代码 nativeRenderFrame(bitmap); ``` 5. 在C++代码中,将传递过来的视频帧数据转换为OpenGL纹理,并通过OpenGL渲染器将其绘制到EGLSurface上。 ```c++ void nativeRenderFrame(JNIEnv *env, jobject bitmap) { // 将Java层传递的Bitmap对象转换为OpenGL纹理 GLuint textureId; glGenTextures(1, &textureId); glBindTexture(GL_TEXTURE_2D, textureId); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels); // 绘制视频帧 renderer->drawFrame(textureId); // 将渲染结果刷新到屏幕上 eglSwapBuffers(eglDisplay, eglSurface); // 释放OpenGL纹理 glDeleteTextures(1, &textureId); } ``` 需要注意的是,这只是一个简单的例子,实际的实现可能会更加复杂,需要考虑如何处理视频帧数据的传递、纹理的创建和释放、OpenGL渲染器的初始化等问题。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 8
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值