https://github.com/bianshaopeng/ffmpegGLESAndSLES.git视频播放器包括视频播放和音频播放,分为三个步骤。
一。ffmpeg解码的到视频包和音频包。
int ret = 0;
ret = avformat_open_input(&inafc, urlPath, NULL, NULL);
if (ret < 0) {
LOGE("open file fail:%d", ret);
return;
}
LOGE("open file success");
//查找流信息
ret = avformat_find_stream_info(inafc, NULL);
if (ret < 0) {
LOGE("find stream info fail");
return;
}
LOGE("find stream info success");
// for (int i = 0; i <inafc->nb_streams ; ++i) {
// if (inafc->streams[i]->codecpar->codec_type = AVMEDIA_TYPE_VIDEO){
// videoIndex = i;
// }
//
// }
videoIndex = av_find_best_stream(inafc, AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);
audioIndex = av_find_best_stream(inafc, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0);
//寻找解码器
if (videoIndex!=-1){
AVCodec *avCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (avCodec == NULL) {
LOGE("find h264 decoder fail");
}
LOGE("find h264 decoder success");
action->avCodecContext = avcodec_alloc_context3(avCodec);
avcodec_parameters_to_context( action->avCodecContext, inafc->streams[videoIndex]->codecpar);
//打开解码器
ret = avcodec_open2( action->avCodecContext, avCodec, NULL);
}
if (audioIndex!=-1){
audioAction->avCodecContext = inafc->streams[audioIndex]->codec;
AVCodec *avCodec = avcodec_find_decoder(audioAction->avCodecContext->codec_id);
ret = avcodec_open2( audioAction->avCodecContext, avCodec, NULL);
}
audioAction->init();
// audioAction->playAudio();
if (ret < 0) {
LOGE("open avcodec fail:%d", ret);
}
LOGE("open avcodec success");
//视频解码
AVPacket *avPacket = av_packet_alloc();
// av_init_packet(avPacket);
while (av_read_frame(inafc, avPacket) >= 0) {
if (avPacket->stream_index == videoIndex) {
action->push(avPacket);
} else if (avPacket->stream_index == audioIndex){
audioAction->push(avPacket);
}
av_packet_unref(avPacket);
}
二。解码视频包得到yuv数据通过glsurfaview播放。
void *consumer(void *data) {
ConsumeAction *consumeAct = static_cast<ConsumeAction*>(data) ;
consumeAct->flag = true;
while (consumeAct->flag) {
int ret = 0;
if (consumeAct->queueVideo.size() > 0) {
pthread_mutex_lock(&consumeAct->mutex);
AVPacket* avPacket = consumeAct->queueVideo.front();
ret = avcodec_send_packet(consumeAct->avCodecContext,avPacket);
if (ret>=0){
AVFrame* avFrame = av_frame_alloc();
ret = avcodec_receive_frame(consumeAct->avCodecContext,avFrame);
if (ret>=0){
LOGE("解码视频成功")
consumeAct->callJavaMethod.callBackJava(avFrame->width,
avFrame->height,
(jbyte*)avFrame->data[0],
(jbyte*)avFrame->data[1],
(jbyte*)avFrame->data[2]);
}
av_frame_free(&avFrame);
}
av_packet_unref(avPacket);
consumeAct->queueVideo.pop();
if (consumeAct->queueVideo.size()<32){
pthread_mutex_unlock(&consumeAct->mutex);
}
} else {
LOGE("消费者正在等待中产品")
pthread_cond_wait(&consumeAct->cond, &consumeAct->mutex);
}
pthread_mutex_unlock(&consumeAct->mutex);
// usleep(1000 * 1);
}
pthread_exit(&consumeAct->cusId);
}
三。解码音频包通过opensles播放。
oensles三部曲
1.创建引擎
slCreateEngine(&pEngine, 0, nullptr, 0, nullptr, nullptr);//创建引擎
(*pEngine)->Realize(pEngine, SL_BOOLEAN_FALSE);//实现SLObjectItf接口对象
(*pEngine)->GetInterface(pEngine, SL_IID_ENGINE, &engineEngine);//通过引擎调用初始化SLObjectItf
2.创建混音器
(*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 0, 0, 0);//用引擎创建混音器接口对象
(*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);//实现混音器接口对象
SLresult sLresult = (*outputMixObject)->GetInterface(outputMixObject,
SL_IID_ENVIRONMENTALREVERB,
&outputMixEnvironmentalReverb);//利用混音器实例对象接口初始化具体的混音器对象
//设置
if (SL_RESULT_SUCCESS == sLresult) {
(*outputMixEnvironmentalReverb)->
SetEnvironmentalReverbProperties(outputMixEnvironmentalReverb, &settings);
}
3.播放
SLDataLocator_AndroidBufferQueue android_queue = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
/**
typedef struct SLDataFormat_PCM_ {
SLuint32 formatType; pcm
SLuint32 numChannels; 通道数
SLuint32 samplesPerSec; 采样率
SLuint32 bitsPerSample; 采样位数
SLuint32 containerSize; 包含位数
SLuint32 channelMask; 立体声
SLuint32 endianness; end标志位
} SLDataFormat_PCM;
*/
SLDataFormat_PCM pcm = {SL_DATAFORMAT_PCM
,2//(立体声)
,SL_SAMPLINGRATE_44_1//44100
,SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT,
SL_BYTEORDER_LITTLEENDIAN};
/*
* typedef struct SLDataSource_ {
void *pLocator;//缓冲区队列
void *pFormat;//数据样式,配置信息
} SLDataSource;
* */
SLDataSource dataSource = {&android_queue, &pcm};
SLDataLocator_OutputMix slDataLocator_outputMix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
SLDataSink slDataSink = {&slDataLocator_outputMix, NULL};
const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_EFFECTSEND, SL_IID_VOLUME};
const SLboolean req[3] = {SL_BOOLEAN_FALSE, SL_BOOLEAN_FALSE, SL_BOOLEAN_FALSE};
/*
* SLresult (*Createaudioplayer) (
SLEngineItf self,
SLObjectItf * pPlayer,
SLDataSource *pAudioSrc,//数据设置
SLDataSink *pAudioSnk,//关联混音器
SLuint32 numInterfaces,
const SLInterfaceID * pInterfaceIds,
const SLboolean * pInterfaceRequired
);
* */
(*engineEngine)->CreateAudioPlayer(engineEngine, &audioplayer, &dataSource, &slDataSink, 3, ids,
req);
(*audioplayer)->Realize(audioplayer, SL_BOOLEAN_FALSE);
(*audioplayer)->GetInterface(audioplayer, SL_IID_PLAY, &slPlayItf);//初始化播放器
//注册缓冲区,通过缓冲区里面 的数据进行播放
(*audioplayer)->GetInterface(audioplayer, SL_IID_BUFFERQUEUE, &slBufferQueueItf);
//设置回调接口
(*slBufferQueueItf)->RegisterCallback(slBufferQueueItf,
getQueueCallBack, this);
//播放
(*slPlayItf)->SetPlayState(slPlayItf, SL_PLAYSTATE_PLAYING);
getQueueCallBack(slBufferQueueItf,this);
注意ndk错误记录
signal 4 (SIGILL), code 1 (ILL_ILLOPC), fault addr 0xba2e3d86
用addr2line定位到
audioContext->playAudio();
void* consumerAudio(void* data){
Audio* audioContext = static_cast<Audio *>(data);
audioContext->playAudio();
}
是因为没有加return 。
void* consumerAudio(void* data){
Audio* audioContext = static_cast<Audio *>(data);
audioContext->playAudio();
return (void *)0;
}