本文基于以下文章进行的开发:
1.新建VideoView类
package com.test.ffmpeg;
import android.content.Context;
import android.graphics.PixelFormat;
import android.util.AttributeSet;
import android.view.Surface;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
/**
* Created by ygdx_lk on 17/11/1.
*/
public class VideoView extends SurfaceView{
public VideoView(Context context) {
this(context, null);
}
public VideoView(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public VideoView(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
init();
}
private void init() {
SurfaceHolder holder = getHolder();
holder.setFormat(PixelFormat.RGBA_8888);
}
public void player(final String input){
new Thread(new Runnable() {
@Override
public void run() {
wrapRender(input, getHolder().getSurface());
}
}).start();
}
public native void wrapRender(String input, Surface surface);
}
2.在native-lib.h中添加新方法:
JNIEXPORT void JNICALL Java_com_test_ffmpeg_VideoView_wrapRender(JNIEnv *env, jobject instance, jstring uri_, jobject surface);
3.在native-lib.cpp中实现:
void Java_com_test_ffmpeg_VideoView_wrapRender(JNIEnv *env, jobject instance, jstring uri_, jobject surface){
const char *c_input_path = env->GetStringUTFChars(uri_, NULL);
// 注册各大组件
av_register_all();
//获取AVFormatContext, 描述了一个媒体文件或媒体流的构成和基本信息
AVFormatContext *pContext = avformat_alloc_context();
//如果小于0,打开文件失败
if (avformat_open_input(&pContext, c_input_path, NULL, NULL) < 0) {
LOGE("打开文件失败");
return;
}
//获取视频时长
// int tns, thh, tmm, tss;
// tns = (pContext->duration)/1000000;
// thh = tns / 3600;
// tmm = (tns % 3600) / 60;
// tss = (tns % 60);
// LOGE("time %02d:%02d:%02d",thh,tmm,tss);
//如果小于0,获取信息失败
if (avformat_find_stream_info(pContext, NULL) < 0) {
LOGE("获取信息失败");
return;
}
// 找到视频流的位置
int idx_video_stream = -1;
//视音频流的个数
unsigned int nb_streams = pContext->nb_streams;
for (int i = 0; i < nb_streams; ++i) {
// 流的类型-> 视频流 音频流 字幕流 AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO
if (pContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
idx_video_stream = i;
break;
}
}
//没有找到视频流
if (idx_video_stream == -1) {
LOGE("获取视频流失败");
return;
}
// 视频流解码上下文
AVCodecContext *pCodecCtx = pContext->streams[idx_video_stream]->codec;
// 根据解码id查找视频解码器,AVCodec是存储编解码信息的结构体
AVCodec *pAvCodec = avcodec_find_decoder(pCodecCtx->codec_id);
// 打开解码器
if (avcodec_open2(pCodecCtx, pAvCodec, NULL) < 0) {
LOGE("打开解码器失败");
}
// 绘制区域
ANativeWindow *nativeWindow = ANativeWindow_fromSurface(env, surface);
// 视频缓冲区
ANativeWindow_Buffer windowBuffer;
// 绘制区域的宽高
int windowWidth = ANativeWindow_getWidth(nativeWindow);
int windowHeight = ANativeWindow_getHeight(nativeWindow);
// 绘制之前,配置 宽和高 (这里设置为 window 的宽和高,以确定偏移量),绘制格式
ANativeWindow_setBuffersGeometry(nativeWindow, windowWidth, windowHeight, WINDOW_FORMAT_RGBA_8888);
// 确定视频的大小,有可能视频的会大于当前的绘制区域,需要缩放
// int videoWidth = windowWidth > pCodecCtx->width ? pCodecCtx->width : windowWidth;
//设置播放区域,宽度铺满
int videoWidth = windowWidth;
//计算高度
int videoHeight = videoWidth * pCodecCtx->height / pCodecCtx->width;
//如果高度大于绘制区域的高度,需要重新计算宽高
if (videoHeight > windowHeight) {
videoHeight = windowHeight;
videoWidth = windowHeight * pCodecCtx->width / pCodecCtx->height;
}
LOGI("w:%d, h:%d w:%d, h:%d", windowWidth, windowHeight, videoWidth, videoHeight);
//视频的话,每个结构一般是存一帧;音频可能有好几帧
//解码前数据:AVPacket
//解码后数据:AVFrame
// 分配一帧的内存, AVPacket是存储压缩编码数据相关信息的结构体
AVPacket *avPacket = (AVPacket *) av_malloc(sizeof(AVPacket));
// 初始化一帧
av_init_packet(avPacket);
// 解封装后的帧数据,
AVFrame *avFrame = av_frame_alloc();
// 转换后的rgb帧数据
AVFrame *rgbaFrame = av_frame_alloc();
// 计算rgb帧数据缓冲区的大小
uint8_t *out_buf = (uint8_t *) av_malloc(avpicture_get_size(AV_PIX_FMT_RGBA, videoWidth, videoHeight));
// 填充 rgba->data 缓冲区
avpicture_fill((AVPicture *) rgbaFrame, out_buf, AV_PIX_FMT_RGBA, videoWidth, videoHeight);
// rgba 转换上下文
SwsContext *swsContext = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
videoWidth,
videoHeight,
AV_PIX_FMT_RGBA,
SWS_BICUBIC, // 效率高,清晰度降低
NULL,
NULL,
NULL);
// 读取的帧数量
int frame_count = 0;
// 是否解封装完成
int got_frame;
// 循环读取每一帧的数据
AVCodecContext *context = pCodecCtx;
bool circulation = true;
int circulation_count = 0;
while (circulation) {//是否需要循环播放
LOGI("循环次数 %d", circulation_count)
if(circulation_count > 5)break;//超出循环次数,退出
while (av_read_frame(pContext, avPacket) >= 0) {// < 0 代表读到文件末尾了
LOGI("stream_index: %d", avPacket->stream_index);
if (avPacket->stream_index == idx_video_stream) {
// 解封装每一帧的视频数据
avcodec_decode_video2(pCodecCtx, avFrame, &got_frame, avPacket);
if (got_frame > 0) {
// 开始绘制,锁定不让其它的线程绘制
ANativeWindow_lock(nativeWindow, &windowBuffer, NULL);
// 解码数据 -> rgba
sws_scale(swsContext, (const uint8_t *const *) avFrame->data, avFrame->linesize,
0,
avFrame->height, rgbaFrame->data, rgbaFrame->linesize);
// 缓冲区的地址
uint8_t *dst = (uint8_t *) windowBuffer.bits;
// 每行的内存大小
int dstStride = windowBuffer.stride * 4;
// 像素区的地址
uint8_t *src = rgbaFrame->data[0];
int srcStride = rgbaFrame->linesize[0];
for (int i = 0; i < videoHeight; i++) {
// 逐行拷贝内存数据,但要进行偏移,否则视频会拉伸变形
// (i + (windowHeight - videoHeight) / 2) * dstStride 纵向偏移,确保视频纵向居中播放
// (dstStride - srcStride) / 2 横向偏移,确保视频横向居中播放
memcpy(dst + (i + (windowHeight - videoHeight) / 2) * dstStride +
(dstStride - srcStride) / 2, src + i * srcStride,
srcStride);
}
//解锁
ANativeWindow_unlockAndPost(nativeWindow);
usleep(16 * 1000);
LOGI("解码第%d帧", frame_count++);
}
av_free_packet(avPacket);
}
}
//int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp,int flags);返回第一帧
av_seek_frame(pContext, 0, 0, 0);
circulation_count ++;
}
//释放资源
av_frame_free(&avFrame);
av_frame_free(&rgbaFrame);
avcodec_close(pCodecCtx);
avformat_free_context(pContext);
env->ReleaseStringUTFChars(uri_, c_input_path);
}
4.activity_main.xml中添加VideoView
<?xml version="1.0" encoding="utf-8"?>
<android.support.constraint.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context="com.test.ffmpeg.MainActivity">
<com.test.ffmpeg.VideoView
android:id="@+id/surface"
android:layout_width="match_parent"
android:layout_height="match_parent" />
<TextView
android:id="@+id/sample_text"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="Hello World!"
app:layout_constraintBottom_toBottomOf="parent"
app:layout_constraintLeft_toLeftOf="parent"
app:layout_constraintRight_toRightOf="parent"
app:layout_constraintTop_toTopOf="parent"
android:textColor="#00ff00"
/>
</android.support.constraint.ConstraintLayout>
5.MainActivity的onCreate方法中修改如下:
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
// Example of a call to a native method
TextView tv = (TextView) findViewById(R.id.sample_text);
final VideoView surface = (VideoView) findViewById(R.id.surface);
tv.setText(stringFromJNI());
tv.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
String video = "input.mp4";
String input = new File(Environment.getExternalStorageDirectory(), video).getAbsolutePath();
Log.i(TAG, "onClick: " + input);
surface.player(input);
}
});
}
6.权限
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>
7.拷贝一个input.mp4文件到手机sdcard下(adb push input.mp4 /sdcard),运行程序,点击tv按钮。