一、项目描述
基于QT+FFmpeg+SDL,从零实现一个简单的音视频播放器
完成功能: 1.完成播放器框架和解复用模块开发;
2.完成包队列、帧队列模块设计;
3.实现解码线程模块;
4.实现声音输出模块和视频画面渲染;
5.实现音视频同步效果;
二、具体实现
1.整体框架
2.解复用模块
// demuxthread.h
#ifndef DEMUXTHREAD_H
#define DEMUXTHREAD_H
#include "thread.h"
#ifdef __cplusplus ///
extern "C"
{
// 包含ffmpeg头文件
#include "libavutil/avutil.h"
#include "libavformat/avformat.h"
}
#endif
class DemuxThread : public Thread
{
public:
DemuxThread();
~DemuxThread();
int Init(const char *url);
int Start();
int Stop();
void Run();
private:
char err2str[256] = {0};
std::string url_;// 文件名
// AVPacketQueue *audio_queue_ = NULL;
// AVPacketQueue *video_queue_ = NULL;
AVFormatContext *ifmt_ctx_ = NULL;
int audio_index_ = -1;
int video_index_ = -1;
};
#endif // DEMUXTHREAD_H
//demuxthread.cpp
#include "demuxthread.h"
#include "log.h"
DemuxThread::DemuxThread()
{
LogInfo("DemuxThread");
}
DemuxThread::~DemuxThread()
{
LogInfo("~DemuxThread");
if(thread_) {
Stop();
}
}
int DemuxThread::Init(const char *url)
{
LogInfo("url:%s", url);
int ret = 0;
url_ = url;
ifmt_ctx_ = avformat_alloc_context();
ret = avformat_open_input(&ifmt_ctx_, url_.c_str(), NULL, NULL);
if(ret < 0) {
av_strerror(ret, err2str, sizeof(err2str));
LogError("avformat_open_input failed, ret:%d, err2str:%s", ret, err2str);
return -1;
}
ret = avformat_find_stream_info(ifmt_ctx_, NULL);
if(ret < 0) {
av_strerror(ret, err2str, sizeof(err2str));
LogError("avformat_find_stream_info failed, ret:%d, err2str:%s", ret, err2str);
return -1;
}
av_dump_format(ifmt_ctx_, 0, url_.c_str(), 0);
audio_index_ = av_find_best_stream(ifmt_ctx_, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
video_index_ = av_find_best_stream(ifmt_ctx_, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
LogInfo("audio_index_:%d, video_index_:%d", audio_index_, video_index_);
if(audio_index_ < 0 || video_index_ < 0) {
LogError("no audio or no video");
return -1;
}
LogInfo("Init leave");
}
int DemuxThread::Start()
{
thread_ = new std::thread(&DemuxThread::Run, this);
if(!thread_) {
LogError("new std::thread(&DemuxThread::Run, this) failed");
return -1;
}
return 0;
}
int DemuxThread::Stop()
{
Thread::Stop();
avformat_close_input(&ifmt_ctx_);
}
void DemuxThread::Run()
{
LogInfo("Run into");
int ret = 0;
AVPacket pkt;
while (abort_ != 1) {
ret = av_read_frame(ifmt_ctx_, &pkt);
if(ret < 0) {
av_strerror(ret, err2str, sizeof(err2str));
LogError("av_read_frame failed, ret:%d, err2str:%s", ret, err2str);
break;
}
if(pkt.stream_index == audio_index_) {
LogInfo("audio pkt");
} else if(pkt.stream_index == video_index_) {
LogInfo("video pkt");
}
av_packet_unref(&pkt);
}
LogInfo("Run finish");
}
3.包队列帧队列模块
队列提供的功能:Push、Pop、Front、Abort、Size
//queue.h
#ifndef QUEUE_H
#define QUEUE_H
#include <mutex>
#include <condition_variable>
#include <queue>
template <typename T>
class Queue
{
public:
Queue() {}
~ Queue() {}
void Abort() {
abort_ = 1;
cond_.notify_all();
}
int Push(T val) {
std::lock_guard<std::mutex> lock(mutex_);
if(1 == abort_) {
return -1;
}
queue_.push(val);
cond_.notify_one();
return 0;
}
int Pop(T &val, const int timeout = 0) {
std::unique_lock<std::mutex> lock(mutex_);
if(queue_.empty()) {
// 等待push或者超时唤醒
cond_.wait_for(lock, std::chrono::milliseconds(timeout), [this] {
return !queue_.empty() | abort_;
});
}
if(1 == abort_) {
return -1;
}
if(queue_.empty()) {
return -2;
}
val = queue_.front();
queue_.pop();
return 0;
}
int Front(T &val) {
std::lock_guard<std::mutex> lock(mutex_);
if(1 == abort_) {
return -1;
}
if(queue_.empty()) {
return -2;
}
val = queue_.front();
return 0;
}
int Size() {
std::lock_guard<std::mutex> lock(mutex_);
return queue_.size();
}
private:
int abort_ = 0;
std::mutex mutex_;
std::condition_variable cond_;
std::queue<T> queue_;
};
#endif // QUEUE_H
//avpacketqueue.h
#ifndef AVPACKETQUEUE_H
#define AVPACKETQUEUE_H
#include "queue.h"
#ifdef __cplusplus ///
extern "C"
{
// 包含ffmpeg头文件
//#include "libavutil/avutil.h"
//#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
}
#endif
class AVPacketQueue
{
public:
AVPacketQueue();
~AVPacketQueue();
void Abort();
int Size();
int Push(AVPacket *val);
AVPacket *Pop(const int timeout);
private:
void release();
Queue<AVPacket *> queue_;
};
#endif // AVPACKETQUEUE_H
//avframequeue.h
#ifndef AVFRAMEQUEUE_H
#define AVFRAMEQUEUE_H
#include "queue.h"
#ifdef __cplusplus ///
extern "C"
{
// 包含ffmpeg头文件
//#include "libavutil/avutil.h"
//#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
}
#endif
class AVFrameQueue
{
public:
AVFrameQueue();
~AVFrameQueue();
void Abort();
int Push(AVFrame *val);
AVFrame *Pop(const int timeout);
AVFrame *Front();
int Size();
private:
void release();
Queue<AVFrame *> queue_;
};
#endif // AVFRAMEQUEUE_H
//avpacketqueue.cpp
#include "avpacketqueue.h"
#include "log.h"
AVPacketQueue::AVPacketQueue()
{
}
AVPacketQueue::~AVPacketQueue()
{
}
void AVPacketQueue::Abort()
{
release();
queue_.Abort();
}
int AVPacketQueue::Size()
{
queue_.Size();
}
int AVPacketQueue::Push(AVPacket *val)
{
AVPacket *tmp_pkt = av_packet_alloc();//创建一个AVPacket
av_packet_move_ref(tmp_pkt, val);
return queue_.Push(tmp_pkt);
}
AVPacket *AVPacketQueue::Pop(const int timeout)
{
AVPacket *tmp_pkt = NULL;
int ret = queue_.Pop(tmp_pkt, timeout);
if(ret < 0) {
if(ret == -1)
LogError("AVPacketQueue::Pop failed");
}
return tmp_pkt;
}
void AVPacketQueue::release()
{
while (true) {
AVPacket *packet = NULL;
int ret = queue_.Pop(packet, 1);
if(ret < 0) {
break;
} else {
av_packet_free(&packet);
continue;
}
}
}
//avframequeue.cpp
#include "avframequeue.h"
#include "log.h"
AVFrameQueue::AVFrameQueue()
{
}
AVFrameQueue::~AVFrameQueue()
{
}
void AVFrameQueue::Abort()
{
release();
queue_.Abort();
}
int AVFrameQueue::Push(AVFrame *val)
{
AVFrame *tmp_frame = av_frame_alloc();
av_frame_move_ref(tmp_frame, val);
return queue_.Push(tmp_frame);
}
AVFrame *AVFrameQueue::Pop(const int timeout)
{
AVFrame *tmp_frame = NULL;
int ret = queue_.Pop(tmp_frame, timeout);
if(ret < 0) {
if(ret == -1)
LogError("AVFrameQueue::Pop failed");
}
return tmp_frame;
}
AVFrame *AVFrameQueue::Front()
{
AVFrame *tmp_frame = NULL;
int ret = queue_.Front(tmp_frame);
if(ret < 0) {
if(ret == -1)
LogError("AVFrameQueue::Pop failed");
}
return tmp_frame;
}
int AVFrameQueue::Size()
{
return queue_.Size();
}
void AVFrameQueue::release()
{
while (true) {
AVFrame *frame = NULL;
int ret = queue_.Pop(frame, 1);
if(ret < 0) {
break;
} else {
av_frame_free(&frame);
continue;
}
}
}
4.解码线程模块
//decodethread.h
#ifndef DECODETHREAD_H
#define DECODETHREAD_H
#include "thread.h"
#include "avpacketqueue.h"
#include "avframequeue.h"
class DecodeThread : public Thread
{
public:
DecodeThread(AVPacketQueue *packet_queue, AVFrameQueue *frame_queue);
~DecodeThread();
int Init(AVCodecParameters *par);
int Start();
int Stop();
void Run();
private:
char err2str[256] = {0};
AVCodecContext *codec_ctx_ = NULL;
AVPacketQueue *packet_queue_ = NULL;
AVFrameQueue *frame_queue_ = NULL;
};
#endif // DECODETHREAD_H
//decodethread.cpp
#include "decodethread.h"
#include "log.h"
DecodeThread::DecodeThread(AVPacketQueue *packet_queue, AVFrameQueue *frame_queue)
:packet_queue_(packet_queue), frame_queue_(frame_queue)
{
}
DecodeThread::~DecodeThread()
{
if(thread_) {
Stop();
}
if(codec_ctx_)
avcodec_close(codec_ctx_);
}
int DecodeThread::Init(AVCodecParameters *par)
{
if(!par) {
LogError("Init par is null");
return -1;
}
codec_ctx_ = avcodec_alloc_context3(NULL);
int ret = avcodec_parameters_to_context(codec_ctx_, par);
if(ret < 0) {
av_strerror(ret, err2str, sizeof(err2str));
LogError("avcodec_parameters_to_context failed, ret:%d, err2str:%s", ret, err2str);
return -1;
}
AVCodec *codec = avcodec_find_decoder(codec_ctx_->codec_id);
if(!codec) {
LogError("avcodec_find_decoder failed");
return -1;
}
ret = avcodec_open2(codec_ctx_, codec, NULL);
if(ret < 0) {
av_strerror(ret, err2str, sizeof(err2str));
LogError("avcodec_open2 failed, ret:%d, err2str:%s", ret, err2str);
return -1;
}
LogInfo("Init finish");
return 0;
}
int DecodeThread::Start()
{
thread_ = new std::thread(&DecodeThread::Run, this);
if(!thread_) {
LogError("new std::thread(&DecodeThread::Run, this) failed");
return -1;
}
return 0;
}
int DecodeThread::Stop()
{
Thread::Stop();
}
void DecodeThread::Run()
{
AVFrame *frame = av_frame_alloc();
LogInfo("DecodeThread::Run info");
while (abort_ !=1 ) {
//控制读取
if(frame_queue_->Size() > 10) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
continue;
}
AVPacket *pkt = packet_queue_->Pop(10);
if(pkt) {
int ret = avcodec_send_packet(codec_ctx_, pkt);
av_packet_free(&pkt);
LogInfo("ret = %d", ret);
if(ret < 0) {
av_strerror(ret, err2str, sizeof(err2str));
LogError("avcodec_send_packet failed, ret:%d, err2str:%s", ret, err2str);
break;
}
// 读取解码后的frame
while (true) {
ret = avcodec_receive_frame(codec_ctx_, frame);
if(ret == 0) {
frame_queue_->Push(frame);
LogInfo("%s frame queue size %d", codec_ctx_->codec->name, frame_queue_->Size());
continue;
} else if(AVERROR(EAGAIN) == ret) {
break;
} else {
abort_ = 1;
av_strerror(ret, err2str, sizeof(err2str));
LogError("avcodec_receive_frame failed, ret:%d, err2str:%s", ret, err2str);
break;
}
}
} else {
LogInfo("not got packet");
}
}
LogInfo("DecodeThread::Run Finish");
}
5.声音输出模块
//AudioOutput.h
#ifndef AUDIOOUTPUT_H
#define AUDIOOUTPUT_H
#ifdef __cplusplus ///
extern "C"
{
// 包含ffmpeg头文件
//#include "libavutil/avutil.h"
#include "SDL.h"
#include "libswresample/swresample.h"
}
#endif
#include "avframequeue.h"
typedef struct AudioParams
{
int freq; //采样率
int channels;//通道数
int64_t channel_layout;//通道布局,如立体声
enum AVSampleFormat fmt;
int frame_size;
}AudioParams;
class AudioOutput
{
public:
AudioOutput(const AudioParams &audio_params, AVFrameQueue *frame_queue);
~AudioOutput();
int Init();
int DeInit();
public:
AudioParams src_tgt_; // 解码后的参数
AudioParams dst_tgt_; // SDL实际输出的格式
AVFrameQueue *frame_queue_ = NULL;
struct SwrContext *swr_ctx_ = NULL;
uint8_t *audio_buf_ = NULL; //保存frame的pcm数据
uint8_t *audio_buf1_ = NULL;
uint32_t audio_buf_size = 0; //整个buf的大小
uint32_t audio_buf1_size = 0;
uint32_t audio_buf_index = 0; //buf的索引
};
#endif // AUDIOOUTPUT_H
//audiooutput.cpp
#include "audiooutput.h"
#include "log.h"
AudioOutput::AudioOutput(const AudioParams &audio_params, AVFrameQueue *frame_queue)
:src_tgt_(audio_params), frame_queue_(frame_queue)
{
}
AudioOutput::~AudioOutput()
{
}
FILE *dump_pcm = NULL;
void fill_audio_pcm(void *udata, Uint8 *stream, int len) {
// 1. 从frame queue读取解码后的PCM的数据,填充到stream
// 2. len = 4000字节, 一个frame有6000字节, 一次读取了4000, 这个frame剩了2000字节
AudioOutput *is = (AudioOutput *)udata;
int len1 = 0;
int audio_size = 0;
if(!dump_pcm) {
dump_pcm = fopen("dump.pcm", "wb");
}
LogInfo("fill pcm len:%d", len);
while (len > 0) { //stream没有填充满
if(is->audio_buf_index == is->audio_buf_size) { //已读完
is->audio_buf_index = 0;
AVFrame *frame = is->frame_queue_->Pop(10);
if(frame) {
// 读到解码后的数据
// 怎么判断要不要做重采样
if( ((frame->format != is->dst_tgt_.fmt) //格式不同时
|| (frame->sample_rate != is->dst_tgt_.freq) //采样率不同时
|| (frame->channel_layout != is->dst_tgt_.channel_layout)) //通道布局
&& (!is->swr_ctx_)) { //初始化采样器
is->swr_ctx_ = swr_alloc_set_opts(NULL,
is->dst_tgt_.channel_layout,
(enum AVSampleFormat)is->dst_tgt_.fmt,
is->dst_tgt_.freq,
frame->channel_layout,
(enum AVSampleFormat)frame->format,
frame->sample_rate,
0, NULL);
if (!is->swr_ctx_ || swr_init(is->swr_ctx_) < 0) { //判断是否正常初始化
LogError(
"Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
frame->sample_rate,
av_get_sample_fmt_name((enum AVSampleFormat)frame->format),
frame->channels,
is->dst_tgt_.freq,
av_get_sample_fmt_name((enum AVSampleFormat)is->dst_tgt_.fmt),
is->dst_tgt_.channels);
swr_free((SwrContext **)(&is->swr_ctx_));
return;
}
}
if(is->swr_ctx_) { // 重采样
const uint8_t **in = (const uint8_t **)frame->extended_data;
uint8_t **out = &is->audio_buf1_;
int out_samples = frame->nb_samples * is->dst_tgt_.freq/frame->sample_rate + 256; //输出大小,不能改变一帧的播放时间
int out_bytes = av_samples_get_buffer_size(NULL, is->dst_tgt_.channels, out_samples, is->dst_tgt_.fmt, 0);//输出字节
if(out_bytes <0) {
LogError("av_samples_get_buffer_size failed");
return;
}
av_fast_malloc(&is->audio_buf1_, &is->audio_buf1_size, out_bytes);
int len2 = swr_convert(is->swr_ctx_, out, out_samples, in, frame->nb_samples); // 返回样本数量
if(len2 <0) {
LogError("swr_convert failed");
return;
}
is->audio_buf_ = is->audio_buf1_;
is->audio_buf_size = av_samples_get_buffer_size(NULL, is->dst_tgt_.channels, len2, is->dst_tgt_.fmt, 1);
} else { // 没有重采样
audio_size = av_samples_get_buffer_size(NULL, frame->channels, frame->nb_samples, (enum AVSampleFormat)frame->format, 1);
av_fast_malloc(&is->audio_buf1_, &is->audio_buf1_size, audio_size);
is->audio_buf_ = is->audio_buf1_;
is->audio_buf_size = audio_size;
memcpy(is->audio_buf_, frame->data[0], audio_size);
}
av_frame_free(&frame);
}else {
// 没有读到解码后的数据
is->audio_buf_ = NULL;
is->audio_buf_size = 512;
}
}
len1 = is->audio_buf_size - is->audio_buf_index;
if(len1 > len)
len1 = len;
if(!is->audio_buf_) { //若buf为空
memset(stream, 0, len1);
} else {
// 真正拷贝有效的数据
memcpy(stream, is->audio_buf_ + is->audio_buf_index, len1);
SDL_MixAudio(stream, is->audio_buf_ + is->audio_buf_index, len1, SDL_MIX_MAXVOLUME/8 );
fwrite((uint8_t *)is->audio_buf_ + is->audio_buf_index, 1, len1, dump_pcm);
fflush(dump_pcm);
}
len -= len1;
stream += len1;
is->audio_buf_index += len1;
}
}
int AudioOutput::Init()
{
if(SDL_Init(SDL_INIT_AUDIO) != 0) {
LogError("SDL_Init failed");
return -1;
}
SDL_AudioSpec wanted_spec, spec;
wanted_spec.channels = 2;// 只支持2channel的输出
wanted_spec.freq = src_tgt_.freq;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.silence = 0;
wanted_spec.callback = fill_audio_pcm;
wanted_spec.userdata = this;
wanted_spec.samples = 1024; // 采样数量
int ret = SDL_OpenAudio(&wanted_spec, NULL); // 更正下这里,如果设置的参数不支持我们就退出
if(ret != 0) {
LogError("SDL_OpenAudio failed");
return -1;
}
dst_tgt_.channels = wanted_spec.channels; //spec.channels;
dst_tgt_.fmt = AV_SAMPLE_FMT_S16;
dst_tgt_.freq = wanted_spec.freq ;// spec.freq;
dst_tgt_.channel_layout = av_get_default_channel_layout(2);
dst_tgt_.frame_size = 1024;//src_tgt_.frame_size;
SDL_PauseAudio(0);
LogInfo("AudioOutput::Init() leave");
}
int AudioOutput::DeInit()
{
SDL_PauseAudio(1);
SDL_CloseAudio();
LogInfo("AudioOutput::DeInit() leave");
}
6.视频画面渲染模块
//videooutput.h
#ifndef VIDEOOUTPUT_H
#define VIDEOOUTPUT_H
#ifdef __cplusplus ///
extern "C"
{
// 包含ffmpeg头文件
//#include "libavutil/avutil.h"
#include "SDL.h"
}
#endif
#include "avframequeue.h"
class VideoOutput
{
public:
VideoOutput(AVFrameQueue *frame_queue, int video_width, int video_height);
int Init();
int MainLoop();
void RefreshLoopWaitEvent(SDL_Event *event);
private:
void videoRefresh(double *remaining_time);
AVFrameQueue *frame_queue_ = NULL;
SDL_Event event_; // 事件
SDL_Rect rect_; //显示区域
SDL_Window *win_ = NULL;
SDL_Renderer *renderer_ = NULL;
SDL_Texture *texture_ = NULL;
int video_width_ = 0;
int video_height_ = 0;
uint8_t *yuv_buf_ = NULL;
int yuv_buf_size_ = 0;
// SDL_mutex mutex_;
};
#endif // VIDEOOUTPUT_H
//videooutput.cpp
#include "videooutput.h"
#include "log.h"
#include <thread>
VideoOutput::VideoOutput(AVFrameQueue *frame_queue, int video_width, int video_height)
:frame_queue_(frame_queue), video_width_(video_width), video_height_(video_height)
{
}
int VideoOutput::Init()
{
if(SDL_Init(SDL_INIT_VIDEO)) {
LogError("SDL_Init failed");
return -1;
}
win_ = SDL_CreateWindow("player", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
video_width_, video_height_, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);
if(!win_) {
LogError("SDL_CreateWindow failed");
return -1;
}
renderer_ = SDL_CreateRenderer(win_, -1, 0);
if(!renderer_) {
LogError("SDL_CreateRenderer failed");
return -1;
}
texture_ = SDL_CreateTexture(renderer_, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, video_width_, video_height_);
if(!texture_) {
LogError("SDL_CreateRenderer failed");
return -1;
}
yuv_buf_size_ = video_width_ * video_height_ * 1.5;
yuv_buf_ = (uint8_t *)malloc(yuv_buf_size_);
return 0;
//faild:
// // 释放资源
}
int VideoOutput::MainLoop()
{
SDL_Event event;
while (true) {
// 读取事件
RefreshLoopWaitEvent(&event);
switch (event.type) {
case SDL_KEYDOWN: //按键按下
if(event.key.keysym.sym == SDLK_ESCAPE) {
LogInfo("esc key down");
return 0;
}
break;
case SDL_QUIT:
LogInfo("SDL_QUIT");
return 0;
break;
default:
break;
}
}
}
#define REFRESH_RATE 0.01
void VideoOutput::RefreshLoopWaitEvent(SDL_Event *event)
{
double remaining_time = 0.0;
SDL_PumpEvents();
while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
if (remaining_time > 0.0)
std::this_thread::sleep_for(std::chrono::milliseconds(int64_t(remaining_time * 1000.0)));
remaining_time = REFRESH_RATE;
// 尝试刷新画面
videoRefresh(&remaining_time);
SDL_PumpEvents();
}
}
void VideoOutput::videoRefresh(double *remaining_time)
{
AVFrame *frame = NULL;
frame = frame_queue_->Front();
if(frame) {
// 有就渲染
rect_.x = 0;
rect_.y = 0;
rect_.w = video_width_;
rect_.h = video_height_;
SDL_UpdateYUVTexture(texture_, &rect_, frame->data[0], frame->linesize[0],//Y
frame->data[1], frame->linesize[1],//U
frame->data[2], frame->linesize[2]);//V
SDL_RenderClear(renderer_);
SDL_RenderCopy(renderer_, texture_, NULL, &rect_);
SDL_RenderPresent(renderer_);
frame = frame_queue_->Pop(1);
av_frame_free(&frame);
}
}
7.音视频同步模块
//avsync.h
#ifndef AVSYNC_H
#define AVSYNC_H
#include <chrono>
#include <ctime>
#include <math.h>
using namespace std::chrono;
class AVSync
{
public:
AVSync() {
}
void InitClock() { //初始化时钟
SetClock(NAN); // 数学对比是一个无效值
}
void SetClockAt(double pts, double time) {
pts_ = pts;
pts_drift_ = pts_ - time;
}
double GetClock() {
double time = GetMicroseconds() / 1000000.0;
return pts_drift_ + time;
}
void SetClock(double pts) {
double time = GetMicroseconds() / 1000000.0; // us -> s
SetClockAt(pts, time);
}
time_t GetMicroseconds() { //获取微秒
system_clock::time_point time_point_new = system_clock::now(); // 时间一直动
system_clock::duration duration = time_point_new.time_since_epoch();
time_t us = duration_cast<microseconds>(duration).count();//转为微秒
return us;
}
double pts_ = 0;
double pts_drift_ = 0;
};
#endif // AVSYNC_H
8.main函数实现
//main
#include <iostream>
#include "log.h"
#include "demuxthread.h"
#include "avframequeue.h"
#include "decodethread.h"
#include "audiooutput.h"
#include "videooutput.h"
using namespace std;
#undef main
int main(int argc, char *argv[])
{
int ret = 0;
cout << "Hello World!" << endl;
LogInit();
// queue
AVPacketQueue audio_packet_queue;
AVPacketQueue video_packet_queue;
AVFrameQueue audio_frame_queue;
AVFrameQueue video_frame_queue;
AVSync avsync;
avsync.InitClock();
//1 .解复用
DemuxThread *demux_thread = new DemuxThread(&audio_packet_queue, &video_packet_queue);
ret = demux_thread->Init(argv[1]);
if(ret < 0) {
LogError("demux_thread.Init failed");
return -1;
}
ret = demux_thread->Start();
if(ret < 0) {
LogError("demux_thread.Start() failed");
return -1;
}
// 解码线程初始化
DecodeThread *audio_decode_thread = new DecodeThread(&audio_packet_queue, &audio_frame_queue);
ret = audio_decode_thread->Init(demux_thread->AudioCodecParameters());
if(ret < 0) {
LogError("audio_decode_thread->Init() failed");
return -1;
}
ret = audio_decode_thread->Start();
if(ret < 0) {
LogError("audio_decode_thread->Start() failed");
return -1;
}
DecodeThread *video_decode_thread = new DecodeThread(&video_packet_queue, &video_frame_queue);
ret = video_decode_thread->Init(demux_thread->VideoCodecParameters());
if(ret < 0) {
LogError("video_decode_thread->Init() failed");
return -1;
}
ret = video_decode_thread->Start();
if(ret < 0) {
LogError("video_decode_thread->Start() failed");
return -1;
}
// 初始化audio输出
AudioParams audio_params = {0};
memset(&audio_params, 0, sizeof(AudioParams));
audio_params.channels = demux_thread->AudioCodecParameters()->channels;
audio_params.channel_layout = demux_thread->AudioCodecParameters()->channel_layout;
audio_params.fmt = (enum AVSampleFormat) demux_thread->AudioCodecParameters()->format;
audio_params.freq = demux_thread->AudioCodecParameters()->sample_rate;
audio_params.frame_size =demux_thread->AudioCodecParameters()->frame_size;
AudioOutput *audio_output = new AudioOutput(&avsync, demux_thread->AudioStreamTimebase(), audio_params, &audio_frame_queue);
ret = audio_output->Init();
if(ret < 0) {
LogError("audio_output->Init() failed");
return -1;
}
VideoOutput *video_output = new VideoOutput(&avsync, demux_thread->VideoStreamTimebase(),
&video_frame_queue, demux_thread->VideoCodecParameters()->width,
demux_thread->VideoCodecParameters()->height);
ret = video_output->Init();
if(ret < 0) {
LogError("video_output->Init() failed");
return -1;
}
video_output->MainLoop();
// 休眠120秒
// std::this_thread::sleep_for(std::chrono::milliseconds(120*1000));
LogInfo("demux_thread->Stop");
demux_thread->Stop();
LogInfo("delete demux_thread");
delete demux_thread;
LogInfo("audio_decode_thread->Stop()");
audio_decode_thread->Stop();
LogInfo("delete audio_decode_thread");
delete audio_decode_thread;
LogInfo("video_decode_thread->Stop()");
video_decode_thread->Stop();
LogInfo("delete video_decode_thread");
delete video_decode_thread;
LogInfo("main finish");
return 0;
}