本课对应源文件下载链接:
https://download.csdn.net/download/XiBuQiuChong/88801295
FFmpeg作为一套庞大的音视频处理开源工具,其源码有太多值得研究的地方。但对于大多数初学者而言,如何快速利用相关的API写出自己想要的东西才是迫切需要的,至于原理的进一步学习那是以后的事情。
在上一课中,我们已经成功获取到视频流并显示,这节课我们将参考视频的工作流程来获取音频并播放。
一、最终实现的效果
这节课实现的效果与上节课相比多了音频部分,有了视频的显示和音频的播放,一个最基本的播放器就完整了。
二、使用FFmpeg获取并播放音频流的原理
要播放音频,就要用FFmpeg对其中的音频流进行解码获取到音频帧,然后将音频帧数据喂给由Windows系统声音播放API构成的函数就可以了。
三、rtmp播放器音频播放实现的主要代码
1.压缩备份上节课工程文件夹为demo2.rar,并修改工程文件夹demo2为demo3,重要的事情再说一遍:及时备份源文件并在原基础上继续迭代开发是一种好习惯。
2.与处理视频的过程差不多,要播放音频就要先初始化音频解码器,在函数runFFmpeg中加入以下代码:
int fmlp::runFFmpeg(){
//返回值
int ret;
//rtmp地址,也可以是本地文件
const char *inFileName = "rtmp://192.168.0.100/vod/sample.mp4";
//输入文件上下文结构体
AVFormatContext *inFormatCtx = NULL;
//视频解码相关
int videoIndex = -1;
AVCodec *vDecodec;
AVCodecContext *vDecodeCtx = NULL;
//图像转换上下文结构体
struct SwsContext* bgrSwsCtx = NULL;
struct SwsContext* yuvSwsCtx = NULL;
//图像数据数组
uint8_t* bgrBuff = NULL;
//读取的数据包
AVPacket normalPkt;
//Mat对象
cv::Mat srcMat;
//音频解码器
int audioIndex = -1;
AVCodec *aDecodec;
AVCodecContext *aDecodeCtx = NULL;
//开始时间和当前时间
int64_t startTime = 0;
int64_t currentTime = 0;
//FFmpeg初始化
av_register_all();
avcodec_register_all();
avformat_network_init();
inFormatCtx = avformat_alloc_context();
AVDictionary* options = NULL;
av_dict_set(&options, "buffer_size", "10240", 0);
av_dict_set(&options, "max_delay", "1000", 0);
av_dict_set(&options, "max_analyze_duration", "10000", 0);
av_dict_set(&options, "probesize", "20480", 0);
av_dict_set(&options, "stimeout", "5000", 0);
av_dict_set(&options, "listen_time", "5000", 0);
av_dict_set(&options, "initial_timeout", "5000", 0);
av_dict_set(&options, "preset", "ultrafast", 0);
av_dict_set(&options, "tune", "zerolatency", 0);
if ((ret = avformat_open_input(&inFormatCtx, inFileName, 0, &options)) < 0)
{
TRACE("无法打开输入流.\n");
return -1;
}
if (ret == 0){
isRunning = true;
}
else{
isRunning = false;
}
if ((ret = avformat_find_stream_info(inFormatCtx, 0)) < 0)
{
TRACE("查找输入流信息失败.\n");
return -1;
}
//获取音视频流通道ID
for (int i = 0; i < inFormatCtx->nb_streams; i++){
if (inFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoIndex = i;
}
if (inFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
{
audioIndex = i;
}
}
TRACE("视频流通道索引%d\n", videoIndex);
//初始化并打开视频解码器
vDecodec = avcodec_find_decoder(inFormatCtx->streams[videoIndex]->codecpar->codec_id);
vDecodeCtx = avcodec_alloc_context3(vDecodec);
avcodec_parameters_to_context(vDecodeCtx, inFormatCtx->streams[videoIndex]->codecpar);
avcodec_open2(vDecodeCtx, vDecodec, 0);
//初始化并打开音频解码器
aDecodec = avcodec_find_decoder(inFormatCtx->streams[audioIndex]->codecpar->codec_id);
aDecodeCtx = avcodec_alloc_context3(aDecodec);
avcodec_parameters_to_context(aDecodeCtx, inFormatCtx->streams[audioIndex]->codecpar);
avcodec_open2(aDecodeCtx, aDecodec, 0);
av_dump_format(inFormatCtx, 0, inFileName, 0);
//解码后的原始视频帧
AVFrame *deVideoFrame = av_frame_alloc();
//缩放后的视频帧
AVFrame bgrFrame = { 0 };
bgrFrame.width = 960;
bgrFrame.height = 540;
bgrFrame.format = AV_PIX_FMT_BGR24;
int bgrFrameSize = av_image_get_buffer_size((AVPixelFormat)bgrFrame.format, bgrFrame.width, bgrFrame.height, 1);
bgrBuff = (uint8_t*)av_malloc(bgrFrameSize);
av_image_fill_arrays(bgrFrame.data, bgrFrame.linesize, bgrBuff, (AVPixelFormat)bgrFrame.format, bgrFrame.width, bgrFrame.height, 1);
//获取图像转换上下文
bgrSwsCtx = sws_getContext(vDecodeCtx->width, vDecodeCtx->height, vDecodeCtx->pix_fmt, bgrFrame.width, bgrFrame.height, (AVPixelFormat)bgrFrame.format, SWS_BICUBIC, NULL, NULL, NULL);
//解码后的原始音频帧
AVFrame *deAudioFrame = av_frame_alloc();
//音频数据数组
char *outAudioBuff;
//音频数据数组队列
struct audioQueObj tmpAudioQueObj;
//获取开始时间
startTime = av_gettime();
while (isRunning)
{
ret = av_read_frame(inFormatCtx, &normalPkt);
if (ret < 0){
break;
}
//当数据包时间快于当前时间则延当延时
currentTime = (av_gettime() - startTime) / 1000;
if (normalPkt.pts > currentTime){
Sleep(normalPkt.pts - currentTime);
}
if (normalPkt.stream_index == videoIndex)
{
ret = avcodec_send_packet(vDecodeCtx, &normalPkt);
ret = avcodec_receive_frame(vDecodeCtx, deVideoFrame);
av_packet_unref(&normalPkt);
ret = sws_scale(bgrSwsCtx, (const uint8_t* const*)deVideoFrame->data, deVideoFrame->linesize, 0, deVideoFrame->height, bgrFrame.data, bgrFrame.linesize);
srcMat = cv::Mat(bgrFrame.height, bgrFrame.width, CV_8UC3, bgrFrame.data[0]);
//imshow("viceo", srcMat);
//cv::waitKey(10);
mainDlg->drawMatOfPlay(srcMat);
av_frame_unref(deVideoFrame);
}
else if (normalPkt.stream_index == audioIndex)
{
ret = avcodec_send_packet(aDecodeCtx, &normalPkt);
while (1){
ret = avcodec_receive_frame(aDecodeCtx, deAudioFrame);
if (ret != 0){
break;
}
else{
int originAudioDataSize = deAudioFrame->linesize[0] * deAudioFrame->channels << 1;
outAudioBuff = new char[originAudioDataSize];
int outSampleNum = convertAudioFrameToAudioBuff(deAudioFrame, &outAudioBuff, originAudioDataSize);
int finalAudioDataSize = outSampleNum *av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) *deAudioFrame->channels;
tmpAudioQueObj.audioDataArr = outAudioBuff;
tmpAudioQueObj.audioDataSize = finalAudioDataSize;
EnterCriticalSection(&queLock);
outAudioQue.push(tmpAudioQueObj);
if (outAudioQue.size() > 50){
free(outAudioQue.front().audioDataArr);
outAudioQue.front().audioDataSize = 0;
outAudioQue.front().audioDataArr = NULL;
outAudioQue.front().audioDataSize = NULL;
outAudioQue.pop();
}
LeaveCriticalSection(&queLock);
}
av_frame_unref(deAudioFrame);
}
av_packet_unref(&normalPkt);
}
}
av_dict_free(&options);
avformat_close_input(&inFormatCtx);
isRunning = false;
return 0;
}
3.为了能播放声音,需要先打开扬声器,然后把队列中的数据送入扬声器:
//打开扬声器
void fmlp::openSpeaker(){
outWaveform.wFormatTag = WAVE_FORMAT_PCM;
outWaveform.nSamplesPerSec = 44100;
outWaveform.wBitsPerSample = 16;
outWaveform.nChannels = 2;
//waveform.nBlockAlign = (waveform.wBitsPerSample * waveform.nChannels) / 8;
outWaveform.nBlockAlign = (outWaveform.wBitsPerSample*outWaveform.nChannels) >> 3;
outWaveform.nAvgBytesPerSec = outWaveform.nBlockAlign * outWaveform.nSamplesPerSec;
outWaveform.cbSize = 0;
waveOutOpen(&hWaveOut, WAVE_MAPPER, &outWaveform, (DWORD)(speakerCallback), 0L, CALLBACK_FUNCTION);
waveOutSetVolume(hWaveOut, 4 * 0xffffffff);
waveHdrArr = new WAVEHDR[audioDataArrNum];
for (int i = 0; i < audioDataArrNum; i++)
{
waveHdrArr[i].lpData = new char[finalAudioDataSize];
waveHdrArr[i].dwBufferLength = finalAudioDataSize;
waveHdrArr[i].dwBytesRecorded = 0;
waveHdrArr[i].dwUser = 0;
waveHdrArr[i].dwFlags = 0;
waveHdrArr[i].dwLoops = 0;
waveHdrArr[i].lpNext = NULL;
waveHdrArr[i].reserved = 0;
waveOutPrepareHeader(hWaveOut, &waveHdrArr[i], sizeof(WAVEHDR));
}
}
//扬声器回调函数
DWORD CALLBACK fmlp::speakerCallback(HWAVEOUT hwaveout, UINT uMsg, DWORD dwInstance, DWORD dwParam1, DWORD dwParam2)
{
switch (uMsg)
{
case WOM_OPEN:
break;
case WOM_DONE:
{
LPWAVEHDR pwh = (LPWAVEHDR)dwParam1;
if (pwh->lpData){
free(pwh->lpData);
pwh->dwBufferLength = 0;
pwh->lpData = NULL;
pwh->dwBufferLength = NULL;
}
}
break;
case WOM_CLOSE:
break;
default:
break;
}
return 0;
}
//播放声音
DWORD WINAPI fmlp::playAudioThreadProc(LPVOID lpParam){
fmlp *pThis = (fmlp*)lpParam;
pThis->playAudio();
return 0;
}
int fmlp::playAudio(){
int i = 0;
while (true){
if (outAudioQue.empty()){
Sleep(5);
continue;
}
EnterCriticalSection(&queLock);
if (waveHdrArr[i].dwFlags & WHDR_PREPARED){
waveHdrArr[i].lpData = (LPSTR)outAudioQue.front().audioDataArr;
waveHdrArr[i].dwBufferLength = outAudioQue.front().audioDataSize;
waveOutWrite(hWaveOut, &waveHdrArr[i], sizeof(WAVEHDR));
outAudioQue.pop();
i++;
}
LeaveCriticalSection(&queLock);
if (i >= audioDataArrNum){
i = 0;
}
Sleep(5);
}
}
4.这样一个最简单的既能播放视频也能播放音频的播放器就完成了。