本博主只关注实战系列,Qt的基本语法课参考其他博主的Qt教程
本章内容会讲解音视频播放中所用到的原理及技术,音频播放器是基于QGraphicsView进行画面显示,QAudioOutput进行音频播放、音视频同步、音视频倍速功能,不使用SDL,很多博客讲解都会用到SDL进行音频的播放,本人以为Qt类库可以解决的问题尽量使用Qt 类库来实现,当然三方库的扩展和效果可能会更好。
一、项目效果
支持媒体文件和实时流
二、总体流程
音频解码流程
三、总体设计
1:创建总线程
创建总线程,用于音视频流的初始化,对音视频线程进行管理
(1):总线程中对音视频的包进行管理
(2):在音视频各自的解码线程中对包进行解码
void Player::run()
{
int nTotalSec = pFormatCtx->duration / (AV_TIME_BASE); // 获取视频的总时间,单位秒
emit sig_sendDuration(nTotalSec); // 发送视频总时长
while (true)
{
if (bStop)
{
break;
}
if (bPause)
{
msleep(10);
continue;
}
// 判断队列大小,防止数据一下被都读取完
if ((m_nAudioStream >= 0 && pAudioPacketQueue->size() > 100) ||
(m_nVideoStream >= 0 && pVideoPacketQueue->size() > 100))
{
msleep(10);
continue;
}
if (bSeek) // 跳转
{
int stream_index = -1;
int64_t seek_target = m_nSeek_Time * AV_TIME_BASE;
if (m_nVideoStream >= 0)
stream_index = m_nVideoStream;
else if (m_nAudioStream >= 0)
stream_index = m_nAudioStream;
AVRational aVRational = { 1, AV_TIME_BASE };
if (stream_index >= 0)
{
seek_target = av_rescale_q(seek_target, aVRational, pFormatCtx->streams[stream_index]->time_base);
}
if (av_seek_frame(pFormatCtx, stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0)
{
qDebug() << "error while seeking";
}
else
{
if (m_nAudioStream >= 0)
{
AVPacket packet;
av_new_packet(&packet, 10);
strcpy((char*)packet.data, "FLUSH");
// 清空队列
audioMutex.lock();
pAudioPacketQueue->clear();
audioMutex.unlock();
// 把清除的包放到队列中
audioMutex.lock();
pAudioPacketQueue->push(&packet);
audioMutex.unlock();
}
if (m_nVideoStream >= 0)
{
AVPacket packet;
av_new_packet(&packet, 10);
strcpy((char*)packet.data, "FLUSH");
// 清空队列
videoMutex.lock();
pVideoPacketQueue->clear();
videoMutex.unlock();
// 把清除的包放到队列中
videoMutex.lock();
pVideoPacketQueue->push(&packet);
videoMutex.unlock();
}
}
bSeek = false;
}
AVPacket* packet = av_packet_alloc(); // 给AVPacket分配空间
int ret = av_read_frame(pFormatCtx, packet); // 读取帧
if (ret < 0)
{
// 这里认为视频读取完了
qDebug() << "av_read_frame fail.";
av_packet_unref(packet);
break;
}
if (packet->stream_index == m_nAudioStream) // 音频
{
// 把数据放入队列
audioMutex.lock();
pAudioPacketQueue->push(packet);
audioMutex.unlock();
}
else if (packet->stream_index == m_nVideoStream) // 视频
{
// 把数据放入队列
videoMutex.lock();
pVideoPacketQueue->push(packet);
videoMutex.unlock();
}
else
{
av_packet_unref(packet);
}
}
// 释放资源
while (!bStop)
{
msleep(100);
}
while ((m_nAudioStream >= 0 && !bAudioFinished) || (m_nVideoStream >= 0 && !bVideoFinished))
{
msleep(10);
}
avformat_close_input(&pFormatCtx);
avformat_free_context(pFormatCtx);
bFinished = true;
2:创建音频解码线程
while (true)
{
if (bStop) // 停止
{
audioMutex.lock();
pAudioPacketQueue->clear();
audioMutex.unlock();
bAudioFinished = true;
break;
}
if (bPause) // 暂停
{
msleep(10);
continue;
}
if (pAudioPacketQueue->size() <= 0)
{
msleep(1); //队列只是暂时没有数据而已
continue;
}
if (getFree() > output->periodSize())
{
audioMutex.lock();
AVPacket* pkt = pAudioPacketQueue->pop();
audioMutex.unlock();
if (strcmp((char*)pkt->data, "FLUSH") == 0) // 收到这个数据 说明刚刚执行过跳转 现在需要把解码器的数据 清除一下
{
avcodec_flush_buffers(pAudioCodecCtx);
av_packet_unref(pkt);
continue;
}
if (avcodec_send_packet(pAudioCodecCtx, pkt) != 0)
{
qDebug("input AVPacket to decoder failed!\n");
av_packet_unref(pkt);
continue;
}
av_packet_unref(pkt);
AVFrame* pFrame = av_frame_alloc();
if (avcodec_receive_frame(pAudioCodecCtx, pFrame) != 0)
{
av_frame_free(&pFrame);
continue;
}
double audioPts = pFrame->pts * av_q2d(pFormatCtx->streams[m_nAudioStream]->time_base);
//qDebug() << "audio pts:" << audioPts;
if (audioPts < m_nSeek_Time)
{
av_frame_free(&pFrame);
continue;
}
emit sig_sendPlayPosition((int)audioPts);
clock->set_clock(audioPts, 0);
uint8_t* data[1];
data[0] = (uint8_t*)out;
// 音频的重采样过程
int len = swr_convert(pAudioSwrCtx, data, 44100 , (const uint8_t**)pFrame->data, pFrame->nb_samples);
if (len > 0)
{
len = av_samples_get_buffer_size(NULL, pAudioCodecCtx->channels, pFrame->nb_samples, AV_SAMPLE_FMT_S16, 0);
}
av_frame_free(&pFrame);
write(out, len);
}
}
if (pAudioSwrCtx != nullptr)
{
swr_free(&pAudioSwrCtx);
pAudioSwrCtx = nullptr;
}
if (pAudioCodecCtx != nullptr)
{
avcodec_close(pAudioCodecCtx);
pAudioCodecCtx = nullptr;
}
qDebug() << QStringLiteral("音频播放线程退出了");
3:创建视频解码线程
while (true)
{
if (bStop) // 停止
{
videoMutex.lock();
pVideoPacketQueue->clear();
videoMutex.unlock();
bVideoFinished = true;
break;
}
if (bPause) // 暂停
{
msleep(10);
continue;
}
videoMutex.lock();
if (pVideoPacketQueue->size() <= 0)
{
videoMutex.unlock();
msleep(1); //队列只是暂时没有数据而已
continue;
}
AVPacket* pkt = pVideoPacketQueue->pop();
videoMutex.unlock();
if (avcodec_send_packet(pVideoCodecCtx, pkt) != 0)
{
qDebug("video input AVPacket to decoder failed!\n");
av_packet_unref(pkt);
continue;
}
AVFrame* pFrame = av_frame_alloc();
if (avcodec_receive_frame(pVideoCodecCtx, pFrame) != 0)
{
av_frame_free(&pFrame);
continue;
}
av_packet_unref(pkt);
if (m_nAudioStream >= 0) // 音视频同步
{
double videoPts = pFrame->pts * av_q2d(pFormatCtx->streams[m_nVideoStream]->time_base); // 单位秒
if (videoPts < m_nSeek_Time)
{
av_frame_free(&pFrame);
continue;
}
double audioTime = clock->get_clock();
if (isnan(audioTime))
{
qDebug() << "videoPlayer audioTime is nan";
usleep(1000 * 1000 / dFrameRate); // 获取帧率
av_frame_free(&pFrame);
continue;
}
int DiffTimeMs = videoPts * 1000 - audioTime * 1000;
if (DiffTimeMs >= 0)//视频时钟比主时钟快,则休眠
{
/*qDebug() << "videoPlayer fast videoPts:" << videoPts;
qDebug() << "videoPlayer fast audioTime:" << audioTime;
qDebug() << "videoPlayer fast:" << DiffTimeMs;*/
msleep(DiffTimeMs);
}
else//视频时钟比主时钟慢,则丢弃该帧
{
//qDebug() << "videoPlayer slowly:" << DiffTimeMs;
msleep(1);
av_frame_free(&pFrame);
continue;
}
}
sws_scale(pVideoSwsCtx,
(const unsigned char* const*)pFrame->data,
pFrame->linesize, 0, pVideoCodecCtx->height, pVideoFrameRGB->data,
pVideoFrameRGB->linesize);
QImage image((uchar*)pVideoFrameRGB->data[0], pVideoCodecCtx->width, pVideoCodecCtx->height, QImage::Format_RGB32);
emit sig_sendFrame(image);
av_frame_unref(pFrame);
}
if (pVideoFrameRGB != nullptr)
{
av_frame_unref(pVideoFrameRGB);
}
if (pVideoSwsCtx != nullptr)
{
sws_freeContext(pVideoSwsCtx);
}
if (pVideoCodecCtx != nullptr)
{
avcodec_close(pVideoCodecCtx);
pVideoCodecCtx = nullptr;
}
qDebug() << QStringLiteral("视频播放线程退出了");
4:创建时钟(音视频同步)
#include "Clock.h"
Clock::Clock()
{
this->m_Speed = 1.0;
this->m_Paused = 0;
set_clock(NAN, -1);
}
Clock::~Clock()
{
}
double Clock::get_clock()
{
if (this->m_Paused)
{
return this->m_Pts;
}
else
{
double time = av_gettime_relative() / 1000000.0;
double ret = this->m_PtsDrift + time; // 展开得: c->pts + (time - c->last_updated)
return ret;
}
}
void Clock::set_clock_at(double pts, int serial, double time)
{
this->m_Pts = pts;
this->m_LastUpdated = time;
this->m_PtsDrift = this->m_Pts - time;
this->m_Serial = serial;
}
void Clock::set_clock(double pts, int serial)
{
double time = av_gettime_relative() / 1000000.0;
set_clock_at(pts, serial, time);
}