一 解码成PCM
流程其实和视频差不多,参考播放视频的代码,稍作增改
先定义后面需要用到的全局变量
//音频解码上下文
AVCodecContext *audioCodecContext;
//音频重采样上下文
SwrContext *audioSwrContext;
//音频重采样缓冲区
uint8_t *audioBuffer;
//音频重采样声道
uint64_t outChLayout = AV_CH_LAYOUT_STEREO;
//音频重采样通道数
int outChannelsNumber = 2;
//音频Packet队列
std::queue<AVPacket *> audioPacketQueue;
//播放状态
int playState = 0;
//音频处理线程id
pthread_t audioPlayId;
打开输入流
//打开输入流
AVFormatContext *avFormatContext = avformat_alloc_context();
const char *inputPath = env->GetStringUTFChars(inputPath_, nullptr);
avformat_open_input(&avFormatContext, inputPath, nullptr, nullptr);
avformat_find_stream_info(avFormatContext, nullptr);
找到音频流
//找到音频流
int audio_index = -1;
for (int i = 0; i < avFormatContext->nb_streams; ++i) {
if (avFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
audio_index = i;
}
}
打开音频解码器
//打开音频解码器
AVCodec *audioCodec = avcodec_find_decoder(
avFormatContext->streams[audio_index]->codecpar->codec_id);
audioCodecContext = avcodec_alloc_context3(audioCodec);
avcodec_parameters_to_context(audioCodecContext,
avFormatContext->streams[audio_index]->codecpar);
avcodec_open2(audioCodecContext, audioCodec, nullptr);
配置重采样上下文
audioSwrContext = swr_alloc();
audioBuffer = static_cast<uint8_t *>(av_mallocz(44100 * 2));
swr_alloc_set_opts(audioSwrContext, outChLayout, AV_SAMPLE_FMT_S16, audioCodecContext->sample_rate,
audioCodecContext->channel_layout, audioCodecContext->sample_fmt,
audioCodecContext->sample_rate, 0,
nullptr);
swr_init(audioSwrContext);
读取音频流添加进队列缓冲,其中播放音频的playAudio函数属于opensl es,后面再给
//解码
while (av_read_frame(avFormatContext, packet) >= 0) {
if (packet->stream_index == audio_index) {
auto *audioPacket = (AVPacket *) av_mallocz(sizeof(AVPacket));
//克隆
if (av_packet_ref(audioPacket, packet)) {
return 0;
}
audioPacketQueue.push(audioPacket);
}
av_packet_unref(packet);
}
pthread_create(&playId, nullptr, playAudio, nullptr);//开启begin线程
while (true) {
//音频播放完成
if (audioPacketQueue.empty()) {
break;
}
}
playState = 1;
定义getPcm函数从audioPacketQueue中取出packet解码音频数据然后重采样存入缓冲区,与视频基本一致,不错此函数由opensl es回调。
int getPcm() {
while (!playState) {
if (!audioPacketQueue.empty()) {
AVFrame *audioFrame = av_frame_alloc();
AVPacket *packet = audioPacketQueue.front();
audioPacketQueue.pop();
avcodec_send_packet(audioCodecContext, packet);
int receiveResult = avcodec_receive_frame(audioCodecContext, audioFrame);
if (!receiveResult) {
swr_convert(audioSwrContext, &audioBuffer, 44100 * 2,
(const uint8_t **) (audioFrame->data), audioFrame->nb_samples);
int size = av_samples_get_buffer_size(nullptr, outChannelsNumber,
audioFrame->nb_samples,
AV_SAMPLE_FMT_S16, 1);
av_frame_free(&audioFrame);
av_packet_unref(packet);
return size;
}
av_frame_free(&audioFrame);
av_packet_unref(packet);
} else {
usleep(100);
}
}
return 0;
}
到上面为止已经完成音频获取pcm工作,下面开始opensl es播放pcm数据。
二 opensl es创建引擎,混音器
先定义全局变量
SLObjectItf engineObject;//用SLObjectItf声明引擎接口对象
SLEngineItf engineEngine;//声明具体的引擎对象
SLObjectItf outputMixObject;//用SLObjectItf创建混音器接口对象
SLEnvironmentalReverbItf outputMixEnvironmentalReverb;具体的混音器对象实例
SLEnvironmentalReverbSettings settings = SL_I3DL2_ENVIRONMENT_PRESET_DEFAULT;//默认情况
SLObjectItf audioPlayerObject;//用SLObjectItf声明播放器接口对象
SLPlayItf slPlayItf;//播放器接口
SLAndroidSimpleBufferQueueItf slBufferQueueItf;//缓冲区队列接口
创建引擎
//创建引擎
void createEngine() {
slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr);
(*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);//实现engineObject接口对象
(*engineObject)->GetInterface(engineObject, SL_IID_ENGINE,
&engineEngine);//通过引擎调用接口初始化SLEngineItf
}
创建混音器
//创建混音器
void createMixVolume() {
(*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 0, nullptr,
nullptr);//用引擎对象创建混音器接口对象
(*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);//实现混音器接口对象
SLresult sLresult = (*outputMixObject)->GetInterface(outputMixObject,
SL_IID_ENVIRONMENTALREVERB,
&outputMixEnvironmentalReverb);
//设置
if (SL_RESULT_SUCCESS == sLresult) {
(*outputMixEnvironmentalReverb)->SetEnvironmentalReverbProperties(
outputMixEnvironmentalReverb, &settings);
}
}
创建播放器,注册回调函数
回调函数中就用到了上面的解码pcm的函数
//创建播放器
void createPlayer() {
SLDataLocator_AndroidBufferQueue androidBufferQueue = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
2};
SLDataFormat_PCM pcm = {
SL_DATAFORMAT_PCM, 2, SL_SAMPLINGRATE_44_1, SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16, SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT,
SL_BYTEORDER_LITTLEENDIAN
};
SLDataSource dataSource = {&androidBufferQueue, &pcm};
SLDataLocator_OutputMix slDataLocatorOutputMix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
SLDataSink slDataSink = {&slDataLocatorOutputMix, nullptr};
const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_EFFECTSEND, SL_IID_VOLUME};
const SLboolean req[3] = {SL_BOOLEAN_FALSE, SL_BOOLEAN_FALSE, SL_BOOLEAN_FALSE};
SLresult playerResult = (*engineEngine)->CreateAudioPlayer(engineEngine, &audioPlayerObject,
&dataSource, &slDataSink, 3, ids,
req);
if (playerResult != SL_RESULT_SUCCESS) {
return;
}
(*audioPlayerObject)->Realize(audioPlayerObject, SL_BOOLEAN_FALSE);
(*audioPlayerObject)->GetInterface(audioPlayerObject, SL_IID_PLAY, &slPlayItf);
(*audioPlayerObject)->GetInterface(audioPlayerObject, SL_IID_BUFFERQUEUE, &slBufferQueueItf);
(*slBufferQueueItf)->RegisterCallback(slBufferQueueItf, bufferQueueCallback, nullptr);
}
void bufferQueueCallback(
SLAndroidSimpleBufferQueueItf caller,
void *pContext
) {
int bufferSize = getPcm();
if (bufferSize != 0) {
(*slBufferQueueItf)->Enqueue(slBufferQueueItf, audioBuffer, bufferSize);
}
}
在播放前进行播放器初始化
void init(JNIEnv *env, jobject instance) {
createEngine();
createMixVolume();
createPlayer();
}