记录 FFmpeg实现文件剪切、合并、BGM添加

说明

代码实现了对原视频文件添加BGM、媒体文件按时间剪切、合并多个媒体文件的功能。虽然实现效果以及功能全面性无法媲美专业软件,但是完全满足基本使用,代码保证实现原理及思路的正确性,二次开发可自行优化完善。

注:对于合并文件应保证合并文件媒体格式完全一致,专业软件处理不一致的文件应该会检查格式并转换为统一格式在进行合并。

代码

已封装为线程类,原理不太复杂但是细节处理较多,尤其是时间戳问题。代码如下

头文件

#ifndef TASKTHREAD_H
#define TASKTHREAD_H

#include <QObject>
#include <QThread>
#include <QDebug>
#include <QTimer>
#include "ffmpeghead.h"

enum TaskType
{
    Type_BGM = 1,
    Type_Cut,
    Type_Merge
};

/**
 * 任务执行线程类
 */
class TaskThread : public QThread
{
    Q_OBJECT
public:
    explicit TaskThread(QObject *parent = nullptr);
    ~TaskThread();

    //公共函数
    void setTaskType(int type);
    void setBgmParm(const QString &source, const QString &bgm);
    void setCutParm(const QString &source, int start, int end);
    void setMergeParm(const QStringList &fileList);
    bool getTaskStatus();

    //背景音乐功能
    void doBgmTask();

    //剪辑功能
    void doCutTask();

    //合并功能
    void doMergeTask();

signals:
    void sigTaskFinish(const QString &info);

protected:
    void run();

private:
    int type;            //功能类型 0背景音乐 1剪辑 2合并

    QString bgmSource;   //bgm视频源文件
    QString bgmDst;      //bgm音频文件

    QString cutSource;   //剪辑源文件
    int cutStart;        //剪辑开始时间
    int cutEnd;          //剪辑结束时间

    QStringList mergeList;  //需要合并的文件列表
};

#endif // TASKTHREAD_H

实现文件

#include "taskthread.h"

TaskThread::TaskThread(QObject *parent) : QThread(parent)
{
    type = 0;

    this->cutStart = 0;
    this->cutEnd = 0;
}

TaskThread::~TaskThread()
{
    quit();
    wait();
}

void TaskThread::setTaskType(int type)
{
    this->type = type;
}

void TaskThread::setBgmParm(const QString &source, const QString &bgm)
{
    this->bgmSource = source;
    this->bgmDst = bgm;
}

void TaskThread::setCutParm(const QString &source, int start, int end)
{
    this->cutSource = source;
    this->cutStart = start;
    this->cutEnd = end;
}

void TaskThread::setMergeParm(const QStringList &fileList)
{
    this->mergeList = fileList;
}

bool TaskThread::getTaskStatus()
{
    return this->isRunning();
}

void TaskThread::doBgmTask()
{
    qDebug() << "doBgmTask";

    std::string tempVideo = bgmSource.toStdString();
    const char *video_filename = tempVideo.c_str();
    std::string tempAudio = bgmDst.toStdString();
    const char *audio_filename = tempAudio.c_str();

    const char *out_filename = "./testbgm.mp4";

    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ifmt_ctx_bgm = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    AVPacket pktBgm;

    int ret = 0;
    uint i = 0;
    int stream_index = 0;
    int *stream_mapping = NULL;
    int stream_mapping_size = 0;
    int audio_index = 0;

    //打开视频文件
    if ((ret = avformat_open_input(&ifmt_ctx, video_filename, 0, 0)) < 0)
    {
        fprintf(stderr, "Could not open input file '%s'\n", video_filename);
        return;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
    {
        fprintf(stderr, "Failed to retrieve input stream information");
        return;
    }

    //打开音频文件
    if ((ret = avformat_open_input(&ifmt_ctx_bgm, audio_filename, 0, 0)) < 0)
    {
        fprintf(stderr, "Could not open input file '%s'\n", audio_filename);
        return;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx_bgm, 0)) < 0)
    {
        fprintf(stderr, "Failed to retrieve input stream information\n");
        return;
    }


    //设置输出文件
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx)
    {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        return;
    }

    //设置输出-根据视频文件
    stream_mapping_size = ifmt_ctx->nb_streams;
    stream_mapping = (int *)av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
    if (!stream_mapping)
    {
        ret = AVERROR(ENOMEM);
        return;
    }

    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    {
        AVStream *out_stream;
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVCodecParameters *in_codecpar = in_stream->codecpar;

        if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
                in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
                in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE)
        {
            stream_mapping[i] = -1;
            continue;
        }

        stream_mapping[i] = stream_index++;

        out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream)
        {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            return;
        }

        ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
        if (ret < 0)
        {
            fprintf(stderr, "Failed to copy codec parameters\n");
            return;
        }
        out_stream->codecpar->codec_tag = 0;
    }

    //设置输出-根据音频文件
    for (i = 0; i < ifmt_ctx_bgm->nb_streams; i++)
    {
        AVStream *out_stream;
        AVStream *in_stream = ifmt_ctx_bgm->streams[i];
        AVCodecParameters *in_codecpar = in_stream->codecpar;

        if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
        {
            audio_index = -1;
            continue;
        }

        audio_index = i;

        out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream)
        {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            return;
        }

        ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
        if (ret < 0)
        {
            fprintf(stderr, "Failed to copy codec parameters\n");
            return;
        }
        out_stream->codecpar->codec_tag = 0;
    }

    //打开输出
    ofmt = ofmt_ctx->oformat;
    if (!(ofmt->flags & AVFMT_NOFILE))
    {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0)
        {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            return;
        }
    }

    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0)
    {
        fprintf(stderr, "Error occurred when opening output file\n");
        return;
    }

    //写入视频文件的信息
    int maxVideoSec = 0;
    while (1)
    {
        AVStream *in_stream, *out_stream;
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;

        in_stream = ifmt_ctx->streams[pkt.stream_index];
        if (pkt.stream_index >= stream_mapping_size || stream_mapping[pkt.stream_index] < 0)
        {
            av_packet_unref(&pkt);
            continue;
        }

        //记录视频文件的最大值
        if(in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            maxVideoSec = pkt.pts * av_q2d(in_stream->time_base);
        }

        pkt.stream_index = stream_mapping[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;

        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0)
        {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_packet_unref(&pkt);
    }

    //写入音频文件的信息
    while (1)
    {
        AVStream *in_stream, *out_stream;
        ret = av_read_frame(ifmt_ctx_bgm, &pktBgm);
        if (ret < 0)
            break;

        in_stream = ifmt_ctx_bgm->streams[pktBgm.stream_index];

        //bgm过长则忽略
        if(pktBgm.pts * av_q2d(in_stream->time_base) >= maxVideoSec)
        {
            av_packet_unref(&pktBgm);
            break;
        }

        //已有视频输出流生成,不能在使用音频默认的index
        pktBgm.stream_index = ifmt_ctx->nb_streams;
        out_stream = ofmt_ctx->streams[pktBgm.stream_index];

        /* copy packet */
        pktBgm.pts = av_rescale_q_rnd(pktBgm.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pktBgm.dts = av_rescale_q_rnd(pktBgm.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pktBgm.duration = av_rescale_q(pktBgm.duration, in_stream->time_base, out_stream->time_base);
        pktBgm.pos = -1;

        ret = av_interleaved_write_frame(ofmt_ctx, &pktBgm);
        if (ret < 0)
        {
            fprintf(stderr, "Error muxing bgm packet\n");
            break;
        }
        av_packet_unref(&pktBgm);
    }

    av_write_trailer(ofmt_ctx);

    qDebug() << "doBgmTask finish";

    avformat_close_input(&ifmt_ctx);
    avformat_close_input(&ifmt_ctx_bgm);

    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);

    avformat_free_context(ofmt_ctx);
    av_freep(&stream_mapping);

    QString info = "添加背景音乐结束";
    emit sigTaskFinish(info);
}

void TaskThread::doCutTask()
{
    qDebug() << "doCutTask";

    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret = 0;
    uint i = 0;
    int stream_index = 0;
    int *stream_mapping = NULL;
    int stream_mapping_size = 0;

    std::string temp = cutSource.toStdString();
    const char *in_filename = temp.c_str();
    const char *out_filename = "./testcut.mp4";


    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0)
    {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        return;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
    {
        fprintf(stderr, "Failed to retrieve input stream information");
        return;
    }


    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx)
    {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        return;
    }

    stream_mapping_size = ifmt_ctx->nb_streams;
    stream_mapping = (int *)av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
    if (!stream_mapping)
    {
        ret = AVERROR(ENOMEM);
        return;
    }

    ofmt = ofmt_ctx->oformat;

    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    {
        AVStream *out_stream;
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVCodecParameters *in_codecpar = in_stream->codecpar;

        if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
                in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
                in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE)
        {
            stream_mapping[i] = -1;
            continue;
        }

        stream_mapping[i] = stream_index++;

        out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream)
        {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            return;
        }

        ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
        if (ret < 0)
        {
            fprintf(stderr, "Failed to copy codec parameters\n");
            return;
        }
        out_stream->codecpar->codec_tag = 0;
    }

    if (!(ofmt->flags & AVFMT_NOFILE))
    {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0)
        {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            return;
        }
    }

    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0)
    {
        fprintf(stderr, "Error occurred when opening output file\n");
        return;
    }

    int curPacketSec = 0;
    int lastPtsVideo = 0;
    int lastDtsVideo = 0;
    int lastPtsAudio = 0;
    int lastDtsAudio = 0;
    while (1)
    {
        AVStream *in_stream, *out_stream;

        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;

        in_stream = ifmt_ctx->streams[pkt.stream_index];
        if (pkt.stream_index >= stream_mapping_size || stream_mapping[pkt.stream_index] < 0)
        {
            av_packet_unref(&pkt);
            continue;
        }

        curPacketSec = pkt.pts * av_q2d(in_stream->time_base);
//        qDebug() << "curPacketSec" << curPacketSec;

        if(curPacketSec < cutStart)
        {
            if(in_stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
            {
                lastPtsAudio = pkt.pts;
                lastDtsAudio = pkt.dts;
            }
            else if(in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
            {
                lastPtsVideo = pkt.pts;
                lastDtsVideo = pkt.dts;
            }

            av_packet_unref(&pkt);
            continue;
        }


        if(in_stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            pkt.pts -= lastPtsAudio;
            pkt.dts -= lastDtsAudio;
        }
        else if(in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            pkt.pts -= lastPtsVideo;
            pkt.dts -= lastDtsVideo;
        }

        pkt.stream_index = stream_mapping[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;

        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0)
        {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_packet_unref(&pkt);

        //时间限制
        if(curPacketSec > cutEnd)
        {
            break;
        }
    }

    av_write_trailer(ofmt_ctx);
    qDebug() << "doCutTask finish";

    avformat_close_input(&ifmt_ctx);
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);

    avformat_free_context(ofmt_ctx);
    av_freep(&stream_mapping);

    QString info = "剪切文件结束";
    emit sigTaskFinish(info);
}

void TaskThread::doMergeTask()
{
    qDebug() << "start doMergeTask";

    const int mergeNum = mergeList.size();
    const char *out_filename = "testmerge.mp4";
    int ret = 0;

    //1 初始化输入文件
    AVFormatContext *ifmt_ctx[mergeNum] = {NULL};
    for(int i=0; i<mergeNum; i++)
    {
        std::string fileName = QString(mergeList.at(i)).toStdString();
        const char *in_filename = fileName.c_str();

        ifmt_ctx[i] = avformat_alloc_context();
        if ((ret = avformat_open_input(&ifmt_ctx[i], in_filename, 0, 0)) < 0)
        {
            fprintf(stderr, "Could not open input file '%s'", in_filename);
            avformat_close_input(&ifmt_ctx[i]);
            return;
        }

        if ((ret = avformat_find_stream_info(ifmt_ctx[i], 0)) < 0)
        {
            fprintf(stderr, "Failed to retrieve input stream information");
            avformat_close_input(&ifmt_ctx[i]);
            return;
        }
    }

    //2 初始化输出文件
    AVFormatContext *ofmt_ctx = NULL;
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx)
    {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        return;
    }

    AVOutputFormat *ofmt = ofmt_ctx->oformat;

    //这里默认用第一个文件参数初始化
    for (uint i=0; i<ifmt_ctx[0]->nb_streams; i++)
    {
        AVStream *in_stream = ifmt_ctx[0]->streams[i];
        AVCodecParameters *in_codecpar = in_stream->codecpar;

        AVStream *out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream)
        {
            fprintf(stderr, "Failed allocating output stream\n");
            return;
        }

        ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
        if (ret < 0)
        {
            fprintf(stderr, "Failed to copy codec parameters\n");
            return;
        }
        out_stream->codecpar->codec_tag = 0;
    }

    if (!(ofmt->flags & AVFMT_NOFILE))
    {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0)
        {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
                avio_closep(&ofmt_ctx->pb);
        }
    }

    //3 输入输出初始化均完成,先文件写头
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0)
    {
        fprintf(stderr, "Error occurred when opening output file\n");
        return;
    }

    //4 循环写入实际包数据
    int fileIndex = 0;
    AVPacket pkt;
    int lastVideoPts[mergeNum] = {0};
    int lastVideoDts[mergeNum] = {0};
    int lastAudioPts[mergeNum] = {0};

    qDebug() << ">>开始写入文件" << fileIndex+1;
    while (true)
    {
        ret = av_read_frame(ifmt_ctx[fileIndex], &pkt);
        if (ret < 0)
        {
            //循环结束跳出
            fileIndex++;
            if(fileIndex == mergeNum)
            {
                break;
            }

            qDebug() << ">>开始写入文件" << fileIndex+1;
            continue;
        }

        AVStream *in_stream = ifmt_ctx[fileIndex]->streams[pkt.stream_index];
        AVStream *out_stream = ofmt_ctx->streams[pkt.stream_index];

        // int curPacketSec = pkt.pts * av_q2d(in_stream->time_base);
        // qDebug() << "pkt原始数据" << curPacketSec << pkt.pts << pkt.dts << pkt.duration << pkt.stream_index;

        if(in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            if(fileIndex >= 1)
            {
                pkt.pts += lastVideoPts[fileIndex-1];
                pkt.dts += lastVideoPts[fileIndex-1];
            }
            lastVideoPts[fileIndex] = pkt.pts;
            lastVideoDts[fileIndex] = pkt.dts;
        }
        else if(in_stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            if(fileIndex >= 1)
            {
                pkt.pts += lastAudioPts[fileIndex-1];
                pkt.dts = pkt.pts;
            }
            lastAudioPts[fileIndex] = pkt.pts;
        }

        // qDebug() << "pkt处理数据" << curPacketSec << pkt.pts << pkt.dts << pkt.duration;

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);

        // qDebug() << "pkt转换数据" << curPacketSec << pkt.pts << pkt.dts << pkt.duration;
        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0)
        {
            char err[32];
            av_strerror(ret,err,32);
            fprintf(stderr, "Error muxing packet error:%s\n",err);
            continue;
        }
        av_packet_unref(&pkt);
    }

    av_write_trailer(ofmt_ctx);

    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);

    qDebug() << "doMergeTask finish";

    QString info = "合并文件结束";
    emit sigTaskFinish(info);
}

void TaskThread::run()
{
    switch (this->type)
    {
    case 1:
        doBgmTask();
        break;
    case 2:
        doCutTask();
        break;
    case 3:
        doMergeTask();
        break;
    default:
        break;
    }
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

你是周小哥啊

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值