ffmpeg使用记录

该代码实现了一个基于QT的视频播放器,使用FFmpeg库进行解码。初始化过程中注册了各种格式和解码器,然后打开输入流,找到最佳视频流,配置解码器上下文,并进行解码和像素格式转换,将解码后的帧数据转换为QImage并发送更新信号。
摘要由CSDN通过智能技术生成

在工作中使用到的东西做个记录。

QT开发环境

#include "videoplayer.h"
#include <QDebug>
#include <QTime>

extern "C"
{
#include <libavutil/opt.h>
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libavformat/avformat.h"
}

VideoPlayer::VideoPlayer(int channel, QObject *parent)
    : QThread(parent)
    , m_playerControl(true)
    , m_channel(channel)//通道
    , m_avDictionary(NULL)
    , m_fmtCtx(NULL)//格式上下文
    , m_decCtx(NULL)//解码上下文
{
    //注册输入输出格式
    av_register_all();
    //初始化网络库 可以打开rtsp rtmp http 协议的流媒体视频)
    avformat_network_init();
    //注册解码器
    //avcodec_register_all();
    //AVPacket 申请空间、初始化
    m_pkt = av_packet_alloc();
    //AVFrame 申请空间、初始化
    m_frame = av_frame_alloc();
}

VideoPlayer::~VideoPlayer()
{
    //释放资源
    if(m_avDictionary != NULL) {
        av_dict_free(&m_avDictionary);
    }
    if(m_frame != NULL) {
        av_frame_free(&m_frame);
    }
    if(m_pkt != NULL) {
        av_packet_free(&m_pkt);
    }
    if(m_decCtx != NULL) {
        avcodec_close(m_decCtx);
        avcodec_free_context(&m_decCtx);
    }
    if(m_fmtCtx != NULL) {
        avformat_close_input(&m_fmtCtx);
    }
    avformat_network_deinit();
}

void VideoPlayer::openPlayer(const QString url, int wait)
{
    m_wait = wait;
    if(isRunning()) {
        qDebug() << " VideoPlayer::openPlayer is already started !";
        return;
    }
    if(m_pkt == NULL || m_frame == NULL)
    {//判断在构造函数分配,析构函数释放的资源是否正常
        qDebug() << " VideoPlayer::openPlayer system error !!!!!!!!!!!!!!!!!!!!!!";
        //如果连构造函数都无法分配资源,界面提示,直接失败返回
        emit sigErrorTips(m_channel, "system error !");
        return;
    }
    m_url = url;
    start();
}

bool VideoPlayer::open()
{
    ///url
    char* url = m_url.toLocal8Bit().data();

    ///参数配置
    if(m_channel < 4) {
        //设置编码算法ultrafast,superfast,veryfast,faster,fast,medium,slow,slower,veryslow和placebo;ultrafast编码速度最快,但压缩率低,生成的文件更大,placebo则正好相反。x264所取的默认值为medium
        av_dict_set(&m_avDictionary, "preset", "superfast", 0);
        //zerolatency:转码延迟,以牺牲视频质量减少时延
        av_dict_set(&m_avDictionary, "tune", "zerolatency", 0);
        //Baseline Profile:基本画质 Extended profile:进阶画质  Main profile:主流画质  High profile:高级画质
        av_dict_set(&m_avDictionary, "profile", "baseline", 0);
    }
    //设置缓存大小,减少花屏
    av_dict_set(&m_avDictionary, "buffer_size", "1024000", 0);
    //udp方式打开
    av_dict_set(&m_avDictionary, "rtsp_transport", "udp", 0);
    //设置超时断开连接时间2秒,单位微秒
    av_dict_set(&m_avDictionary, "stimeout", "2000000", 0);
    //muxdelay || max_delay:设置延迟约束,muxdelay以秒为单位设置延迟,而max_delay以微秒为单位设置延迟。最终结果是相同的
    av_dict_set(&m_avDictionary, "max_delay", "500000", 0);

    ///打开一个输入流,并读取header
    int ret = avformat_open_input(&m_fmtCtx, url, NULL, &m_avDictionary);
    if (ret != 0) {//打开失败时,m_fmtCtx已经被ffmpeg内部释放,不需要再去释放相关资源
        char buff[1024] = { 0 };
        av_strerror(ret, buff, sizeof(buff) - 1);
        if(m_avDictionary) {
            av_dict_free(&m_avDictionary);
            m_avDictionary = NULL;
        }
        qDebug() << "VideoPlayer::open channel" << m_channel << "avformat_open_input" << m_url << " error !" << ret << buff;
        return false;
    }
    //获取流信息
    ret = avformat_find_stream_info(m_fmtCtx, 0);
    //打印视频流详细信息
    //av_dump_format(m_fmtCtx, 0, url, 0);
    //获取视频索引,用于读取时区分音视频数据
    m_videoStream = av_find_best_stream(m_fmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    //找到合适的解码器
    const AVCodec *vcodec = avcodec_find_decoder(m_fmtCtx->streams[m_videoStream]->codecpar->codec_id);
    if (!vcodec)
    {
        qDebug() << "VideoPlayer::open couldn't found the codec, codec_id is " << m_fmtCtx->streams[m_videoStream]->codecpar->codec_id;
        avformat_close_input(&m_fmtCtx);//释放资源
        return false;
    }
    //创建视频解码器上下文(注意:m_decCtx 必须 freed,才能创建成功)
    m_decCtx = avcodec_alloc_context3(vcodec);
    if(m_decCtx == NULL) {//创建失败
        qDebug() << "VideoPlayer::open avcodec_alloc_context3 error !";
        avformat_close_input(&m_fmtCtx);//释放资源
        return false;
    }
    //配置解码器上下文参数(由streams中的配置复制过来)
    avcodec_parameters_to_context(m_decCtx, m_fmtCtx->streams[m_videoStream]->codecpar);

    m_decCtx->bit_rate = 200000;   //码率
    m_decCtx->time_base.num = 1;  //下面两行:一秒钟10帧
    m_decCtx->time_base.den = 10;
    m_decCtx->frame_number = 1;  //每包一个视频帧
    m_decCtx->thread_count = 8;//8线程解码
    m_decCtx->thread_type = 2;

    av_opt_set(m_decCtx->priv_data, "preset", "medium", 0);

    //打开视频解码器上下文
    ret = avcodec_open2(m_decCtx, 0, 0);
    if (ret != 0)
    {//解码打开失败
        char buff[1024] = { 0 };
        av_strerror(ret, buff, sizeof(buff) - 1);
        qDebug() << "VideoPlayer::open avcodec_open2 error ! " << buff;
        //释放资源
        avcodec_free_context(&m_decCtx);
        avformat_close_input(&m_fmtCtx);
        return false;
    }
    qDebug() << "VideoPlayer::open video avcodec_open2 succeed ! channel" << m_channel << QThread::currentThreadId();
    return true;
}

void VideoPlayer::close()
{
    qDebug() << " VideoPlayer::close xxxxxxxxxxxx channel" << m_channel << QThread::currentThreadId();
    if(m_decCtx != NULL) {
        //释放解码相关资源
        avcodec_close(m_decCtx);
        //释放解码上下文并置NULL
        avcodec_free_context(&m_decCtx);
    }
    if(m_fmtCtx != NULL) {
        //关闭格式上下文并置NULL
        avformat_close_input(&m_fmtCtx);
    }
}

void VideoPlayer::run()
{
    msleep(m_wait);
    //像素格式转换上下文
    SwsContext *vctx = NULL;
    unsigned char *rgb = NULL;
    while (1)
    {
        /// 检查是否暂停
        if(!m_playerControl) {
            if(m_fmtCtx != NULL || m_decCtx != NULL) close();//释放资源
            sleep(1);
            emit sigPlayerClosed(m_channel);
            return;
        }
        /// 打开视频
        if(m_fmtCtx == NULL || m_decCtx == NULL) {//检查指针
            if(!open()) {//视频打开失败,界面提示,等待1秒后重新打开
                emit sigErrorTips(m_channel, "video open error !");
                sleep(1);
                continue;
            }
        }
        /// 读取解码数据
        int ret = av_read_frame(m_fmtCtx, m_pkt);
        if (ret != 0)
        {//拿不到视频数据,释放资源,等待1秒后重新打开
            char buff[1024] = { 0 };
            av_strerror(ret, buff, sizeof(buff) - 1);
            qDebug() << QTime::currentTime().toString("hh:mm:ss.zzz") << "channel" << m_channel
                     << "av_read_frame ret =" << ret << "errno =" << errno << ",info:" << buff;
            close();//释放资源
            QThread::sleep(1);
            continue;
        }
        if (m_pkt->stream_index != m_videoStream) {
            av_packet_unref(m_pkt);//拿到的不是视频包,释放库内部单次使用的内存
            continue;
        }
        ///解码视频
        //发送packet到解码器解码,发完立即返回
        ret = avcodec_send_packet(m_decCtx, m_pkt);
        //释放库内部单次使用的内存,并将引用计数-1
        av_packet_unref(m_pkt);
        if (ret != 0)
        {//解码失败
            char buff[1024] = { 0 };
            av_strerror(ret, buff, sizeof(buff) - 1);
            qDebug() << "xxxxxxxxxxxxxxxxx channel" << m_channel << "avcodec_send_packet error, " << buff;
            QThread::msleep(100);
            continue;
        }
        while (1)
        {
            //从解码器中拿解码后的数据,一次send可能对应多次receive
            ret = avcodec_receive_frame(m_decCtx, m_frame);
            if (ret != 0)
            {//解码结束或者失败
                break;
            }
            //视频像素转换
            //创建或拿到转换上下文
            vctx = sws_getCachedContext(
                vctx,		//传递NULL会新创建
                m_frame->width, m_frame->height,	//输入的宽高
                (AVPixelFormat)m_frame->format, 	//输入的格式
                m_frame->width, m_frame->height,	//输出的宽高
                AV_PIX_FMT_RGBA,				//输出格式RGBA
                SWS_BILINEAR,					//转换的算法
                0,0,0);
            if (vctx)
            {
                //qDebug() <<  "channel" << m_channel << "SwsContext is ready to scale frame .";
                if (rgb == NULL)
                {
                    rgb = new unsigned char[m_frame->width*m_frame->height * 4];
                }
                uint8_t *data[2] = { 0 };
                data[0] = rgb;
                int lines[2] = { 0 };
                lines[0] = m_frame->width * 4;
                //调用像素转换
                sws_scale(
                    vctx,
                    m_frame->data,		//输入数据
                    m_frame->linesize,	//输入行大小
                    0,
                    m_frame->height,	//输入高度
                    data,				//输出的数据和大小
                    lines
                    );
                QImage image(data[0], m_frame->width, m_frame->height, QImage::Format_RGBA8888);
                emit sigUpdateFrame(m_channel, image);
            }
            else
            {
                qDebug() << "xxxxxxxxxxxxxxxxx channel" << m_channel << "SwsContext is NULL !!!!!!!!!!!!!!";
            }
        }
    }
    //线程退出时释放内部申请的内存
    if(rgb) delete [] rgb;
    sws_freeContext(vctx);
}

void VideoPlayer::playControl(bool play)
{
    m_playerControl = play;
}
#ifndef VIDEOPLAYER_H
#define VIDEOPLAYER_H

#include <QObject>
#include <QImage>
#include <QString>
#include <QThread>

struct AVDictionary;
struct AVFormatContext;
struct AVCodecContext;
struct AVPacket;
struct AVFrame;

class VideoPlayer : public QThread
{
    Q_OBJECT
public:
    explicit VideoPlayer(int channel, QObject *parent = nullptr);
    ~VideoPlayer();
    //打开视频
    void openPlayer(const QString url, int wait);

    //线程主体
    void run() override;

    //播放控制
    void playControl(bool play);

signals:
    //更新画面
    void sigUpdateFrame(int channel, QImage frame);
    //错误提示
    void sigErrorTips(int channel, QString info);
    //停止播放
    void sigPlayerClosed(int channel);

private:
    bool open();
    void close();

private:
    bool                     m_playerControl;//如果为真则,一直播放,否则退出播放

    int                      m_wait;
    int                      m_channel;
    QString                  m_url;

    int                      m_videoStream;//视频索引
    AVDictionary            *m_avDictionary;
    AVFormatContext         *m_fmtCtx;//格式上下文
    AVCodecContext          *m_decCtx;//解码上下文
    AVPacket                *m_pkt;//数据包
    AVFrame                 *m_frame;//包内的像素帧
};

#endif // VIDEOPLAYER_H

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值