【FFmpeg】多媒体文件处理

在这里插入图片描述

1、ffmpeg的日志打印

include<libavutil/log.h>
av_log_set_level(AV_LOG_DEBUG)
av_log(NULL, AV_LOG_INFO,"...");


AV_LOG_ERROR
AV_LOG_WARNING
AV_LOG_INFO
AV_LOG_DEBUG

具体例子:
ff_log.c :

#include<stdio.h>
#include<libavutil/log.h>

int main(int argc, char *argv[]){
	av_log_set_level(AV_LOG_INFO);
	av_log(NULL, AV_LOG_DEBUG, "Hello world:%s%d\n","aaa",10);
	av_log(NULL, AV_LOG_INFO, "Hello world INFO!\n");
	av_log(NULL, AV_LOG_ERROR, "Hello world ERROR!\n");
	return 0;
}
vim ff_log.c 


clang -g -o ff_log ff_log.c -I/usr/local/ffmpeg/include -L/usr/local/ffmpeg/lib -lavutil
或者:
clang -g -o ff_log ff_log.c `pkg-config --cflags --libs libavutil`


./ff_log
Hello world INFO!
Hello world ERROR!

mac中,使用第二种方式编译链接时报错如下:

Package libavutil was not found in the pkg-config search path.
Perhaps you should add the directory containing `libavutil.pc'
to the PKG_CONFIG_PATH environment variable
No package 'libavutil' found
ff_log.c:2:9: fatal error: 'libavutil/log.h' file not found
#include<libavutil/log.h>
        ^~~~~~~~~~~~~~~~~
1 error generated.

解决办法如下:

vim ~/.bash_profile 
source ~/.bash_profile

在文件中添加最后两行export:

# HomeBrew
export HOMEBREW_BOTTLE_DOMAIN=https://mirrors.ustc.edu.cn/homebrew-bottles
export PATH="/usr/local/bin:$PATH"
export PATH="/usr/local/sbin:$PATH"
export PATH="/usr/local/ffmpeg/bin:$PATH"


export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/ffmpeg/lib/pkgconfig
export LD_LIBRARY_PATH=$LIB_LIBRARY_PATH:/usr/local/ffmpeg/lib
# HomeBrew END

2、ffmpeg文件与目录操作

文件删除与重命名:

avpriv_io_delete()

avpriv_io_move()

ffmpeg_file.c:

#include<stdio.h>
#include<libavutil/log.h>
#include<libavformat/avformat.h>

int main(int argc, char* argv[]){
	
	int ret;

	// 重命名文件
	ret = avpriv_io_move("111.txt", "222.txt");
	if(ret < 0){
		av_log(NULL, AV_LOG_ERROR, "Failed to rename\n");
		return -1;
	}
	av_log(NULL, AV_LOG_INFO, "Success to rename\n");

	// 删除文件
	// url
	ret = avpriv_io_delete("./mytestfile.txt");
	if(ret < 0){
		av_log(NULL, AV_LOG_ERROR, "Fail to delete file mytestfile.txt\n");
		return -1;
	}
	av_log(NULL, AV_LOG_INFO, "Success to delete mytestfile.txt\n");
	return 0;
}

编译运行:

ffmpeg_file % clang -g -o ffmpeg_mov_del ffmpeg_file.c `pkg-config --cflags --libs libavutil --libs libavformat`
./ffmpeg_mov_del

3、ffmpeg操作目录

avio_open_dir()

avio_read_dir()

avio_close_dir()

AVIODirContext	操作目录的上下文

AVIODirEntry	目录项 存放文件名,文件大小等信息 
实现一个简单的ls命令:

ffmpeg_file_list.c:

#include<libavutil/log.h>
#include<libavformat/avformat.h>

int main(int argc, char* argv[]){
	int ret;
	AVIODirContext *ctx = NULL;
	AVIODirEntry *entry = NULL;	

	av_log_set_level(AV_LOG_INFO);
	ret = avio_open_dir(&ctx, "./", NULL);
	if(ret < 0){
		av_log(NULL, AV_LOG_ERROR, "Cannot open dir:%s\n", av_err2str(ret));
	}
	
	while(1){
		ret = avio_read_dir(ctx, &entry);
		if(ret < 0){
			av_log(NULL, AV_LOG_ERROR, "Cannot read dir:%s\n", av_err2str(ret));
			goto __fail;
		}
		if(!entry){
			break;
		}

		av_log(NULL, AV_LOG_INFO, "%12"PRId64" %s \n", entry->size, entry->name);
		avio_free_directory_entry(&entry);
	}

__fail:
	avio_close_dir(&ctx);
	return 0;
}

编译运行:

clang -g -o ffmpeg_list ffmpeg_file_list.c `pkg-config --cflags --libs libavutil --libs libavformat`

./ffmpeg_list
        6148 .DS_Store 
          96 ffmpeg_del.dSYM 
       49848 ffmpeg_del 
           0 222.txt 
         536 ffmpeg_file.c 
         689 ffmpeg_file_list.c 
       50408 ffmpeg_list 
          96 ffmpeg_list.dSYM 

4、ffmpeg处理流数据

4.1 概念

多媒体文件其实就是容器,在容器中有很多流(流Stream/轨Track);
从流中读出的数据称为包;
在一个包中包含着一个或多个帧。

4.2 重要结构体
AVFormatContext	格式上下文
AVStream	流
AVPacket	包
4.3 FFmpeg操作流数据的基本步骤
  • 解复用 打开容器
  • 获取流 拿到容器中的流
  • 读数据包 从流中取出一个个数据包
  • 释放资源 做完逻辑操作后释放资源

5、实战

5.1 打印音视频信息

新版本ffmpeg中弃用了这个函数:av_register_all() 所有ffmpeg代码开始前都得先调用此api

avformat_open_input()avformat_close_input()	成对出现
打开多媒体文件,输出结构体AVFormatContext,借助这个格式上下文获取参数信息,最后关闭多媒体文件

av_dump_format() 打印多媒体文件的meta信息
vim mediainfo.c

输入内容如下:
#include<libavutil/log.h>
#include<libavformat/avformat.h>

int main(int argc, char* argv[])
{
	int ret;
	AVFormatContext *fmt_ctx = NULL; // 定义上下文

	av_log_set_level(AV_LOG_INFO); // 设置日志级别
	
	//av_register_all(); // 注册多媒体格式协议,新版本ffmpeg中弃用了这个函数
	
	avformat_open_input(&fmt_ctx,"./test.mp4", NULL, NULL); // 打开多媒体文件 第3个参数NULL:指定输入格式,默认文件后缀名
	if(ret<0){ // 如果打开失败,就打印日志信息并退出
		av_log(NULL, AV_LOG_ERROR, "Can't open file: %s\n",av_err2str(ret));
		return -1;
	}

	// 打开成功,进入这里,执行下面的逻辑
	av_dump_format(fmt_ctx, 0, "./test.mp4", 0); // 打印多诶题文件的meta信息  no2填0记住即可;no4填0表示是输入文件,如果输出文件填1

	avformat_close_input(&fmt_ctx); // 关闭多媒体文件

	return 0;
}

编译:

clang -g -o mediainfo mediainfo.c `pkg-config --cflags --libs libavutil --libs libavformat`
有个警告:
mediainfo.c:11:2: warning: 'avcodec_register_all' is deprecated

执行:

 ./mediainfo
 
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from './test.mp4':
  Metadata:
    major_brand     : isom
    minor_version   : 512
    compatible_brands: isomiso2avc1mp41
    comment         : vid:v0300fa50000c08eg9qcf5vi1hpn9gfg
    encoder         : Lavf58.20.100
  Duration: 00:00:10.94, bitrate: N/A
    Stream #0:0(und): Video: h264 (avc1 / 0x31637661), none, 720x1280, 1174 kb/s, SAR 1:1 DAR 9:16, 29.82 fps, 30 tbr, 15360 tbn (default)
    Metadata:
      handler_name    : VideoHandler
    Stream #0:1(und): Audio: aac (mp4a / 0x6134706D), 44100 Hz, 2 channels, 128 kb/s (default)
    Metadata:
      handler_name    : SoundHandler

5.2 抽取音频数据

av_init_packet() 初始化的数据包

av_find_best_stream() 找到想要拿到的最好的一路流

av_read_frame() / av_packet_unref()  read获取流中的数据包,做一些处理,本例是写入aac文件; 
每次用read读取数据包,会增加引用计数,使用unref可以释放引用计数,让ffmpeg自动释放包,避免内存泄漏

使用课程中的代码抽取音频:

#include<stdio.h>
#include<libavutil/log.h>
#include<libavformat/avformat.h>
#include <libavformat/avio.h>

#define ADTS_HEADER_LEN  7;

void adts_header(char *szAdtsHeader, int dataLen){

    int audio_object_type = 2; 
    int sampling_frequency_index = 7;
    int channel_config = 2;

    int adtsLen = dataLen + 7;

    szAdtsHeader[0] = 0xff;         //syncword:0xfff                          高8bits
    szAdtsHeader[1] = 0xf0;         //syncword:0xfff                          低4bits
    szAdtsHeader[1] |= (0 << 3);    //MPEG Version:0 for MPEG-4,1 for MPEG-2  1bit
    szAdtsHeader[1] |= (0 << 1);    //Layer:0                                 2bits 
    szAdtsHeader[1] |= 1;           //protection absent:1                     1bit

    szAdtsHeader[2] = (audio_object_type - 1)<<6;            //profile:audio_object_type - 1                      2bits
    szAdtsHeader[2] |= (sampling_frequency_index & 0x0f)<<2; //sampling frequency index:sampling_frequency_index  4bits 
    szAdtsHeader[2] |= (0 << 1);                             //private bit:0                                      1bit
    szAdtsHeader[2] |= (channel_config & 0x04)>>2;           //channel configuration:channel_config               高1bit

    szAdtsHeader[3] = (channel_config & 0x03)<<6;     //channel configuration:channel_config      低2bits
    szAdtsHeader[3] |= (0 << 5);                      //original:0                               1bit
    szAdtsHeader[3] |= (0 << 4);                      //home:0                                   1bit
    szAdtsHeader[3] |= (0 << 3);                      //copyright id bit:0                       1bit  
    szAdtsHeader[3] |= (0 << 2);                      //copyright id start:0                     1bit
    szAdtsHeader[3] |= ((adtsLen & 0x1800) >> 11);           //frame length:value   高2bits

    szAdtsHeader[4] = (uint8_t)((adtsLen & 0x7f8) >> 3);     //frame length:value    中间8bits
    szAdtsHeader[5] = (uint8_t)((adtsLen & 0x7) << 5);       //frame length:value    低3bits
    szAdtsHeader[5] |= 0x1f;                                 //buffer fullness:0x7ff 高5bits
    szAdtsHeader[6] = 0xfc;
}

int main(int argc, char* argv[])
{
	int ret;
	int len;
	int audio_index;
	char* src = NULL;
	char* dst = NULL;
	AVPacket pkt;
	AVFormatContext *fmt_ctx = NULL; // 定义上下文

	av_log_set_level(AV_LOG_INFO); // 设置日志级别

	// 1:read two params from console
	if(argc<3){
		av_log(NULL, AV_LOG_ERROR, "the count of params should be 3.\n");
		return -1;
	}	
	
	src = argv[1];
	dst = argv[2];
	if(!src || !dst){
		av_log(NULL, AV_LOG_ERROR, "src or dst is null!\n");
		return -1;
	}

	ret = avformat_open_input(&fmt_ctx, src, NULL, NULL); // 打开多媒文件
	if(ret<0){ // 如果打开失败,就打印日志信息并退出
		av_log(NULL, AV_LOG_ERROR, "Can't open file: %s\n",av_err2str(ret));
		return -1;
	}

	// 打开成功,进入这里,执行下面的逻辑
	FILE* dst_fd = fopen(dst, "wb");
	if(!dst_fd){
		av_log(NULL, AV_LOG_ERROR, "Can't open out file!\n");
		avformat_close_input(&fmt_ctx); // 此时输入流已打开了,需要关闭
		return -1;
	}
	av_dump_format(fmt_ctx, 0, src, 0); // 打印多诶题文件的meta信息  no2填0记住即可;no4填0表示是输入文件,如果输出文件填1

	// 2:get stream
	ret = av_find_best_stream(fmt_ctx/*上下文*/, 
				AVMEDIA_TYPE_AUDIO/*想要抽取的流的类型,音频流*/, 
				-1/*流的索引号,此处不知道*/,
				-1/*音频流对应的视频流的索引号,此处不关心*/,
				NULL/*流的编解码器,不关心*/,
				0/*flag,一些标准,不关心*/ );		

	if(ret<0){
		av_log(NULL, AV_LOG_ERROR, "Cannot find the best stream!\n");
		avformat_close_input(&fmt_ctx);
		fclose(dst_fd);
		return -1;
	}
	
	audio_index = ret;

	av_init_packet(&pkt);

	while(av_read_frame(fmt_ctx, &pkt) >= 0){
		if(pkt.stream_index == audio_index){

			// 生成adts头,并写入文件
			char adts_header_buf[7];
            adts_header(adts_header_buf, pkt.size);
            fwrite(adts_header_buf, 1, 7, dst_fd);

			// 3:write audio data to aac file
			len = fwrite(pkt.data/*待存内容*/, 1/*逐个字节写入*/, pkt.size/*写入多少个字节*/, dst_fd/*写入哪个文件*/);
		
			if(len != pkt.size){
				av_log(NULL, AV_LOG_WARNING, "warning, length of data is not equal size of pkt!");
			}
		}
		av_packet_unref(&pkt); // 不管是不是最好的那路流,到这里都要释放
	}
	
	avformat_close_input(&fmt_ctx);
	if(dst_fd){
		fclose(dst_fd);
	}

	avformat_close_input(&fmt_ctx); // 关闭多媒体文件

	return 0;
}

为什么我使用课程中的程序无法成功抽取AAC音频?
利用ffmpeg中的api实现音频抽取

#include <stdio.h>
#include <libavutil/log.h>
#include <libavformat/avio.h>
#include <libavformat/avformat.h>

#define ERROR_STR_SIZE 1024

int main(int argc, char *argv[])
{
    int err_code;
    char errors[1024];

    char *src_filename = NULL;
    char *dst_filename = NULL;

    FILE *dst_fd = NULL;

    int audio_stream_index = -1;
    int len;

    AVFormatContext *ofmt_ctx = NULL;
    AVOutputFormat *output_fmt = NULL;

    AVStream *in_stream = NULL;
    AVStream *out_stream = NULL;

    AVFormatContext *fmt_ctx = NULL;
    //AVFrame *frame = NULL;
    AVPacket pkt;

    av_log_set_level(AV_LOG_DEBUG);

    if(argc < 3){
        av_log(NULL, AV_LOG_DEBUG, "the count of parameters should be more than three!\n");
        return -1;
    }

    src_filename = argv[1];
    dst_filename = argv[2];

    if(src_filename == NULL || dst_filename == NULL){
        av_log(NULL, AV_LOG_DEBUG, "src or dts file is null, plz check them!\n");
        return -1;
    }

    /*register all formats and codec*/
    av_register_all();

    /*open input media file, and allocate format context*/
    if((err_code = avformat_open_input(&fmt_ctx, src_filename, NULL, NULL)) < 0){
        av_strerror(err_code, errors, 1024);
        av_log(NULL, AV_LOG_DEBUG, "Could not open source file: %s, %d(%s)\n",
               src_filename,
               err_code,
               errors);
        return -1;
    }

    /*retrieve audio stream*/
    if((err_code = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
        av_strerror(err_code, errors, 1024);
        av_log(NULL, AV_LOG_DEBUG, "failed to find stream information: %s, %d(%s)\n",
               src_filename,
               err_code,
               errors);
        return -1;
    }

    /*dump input information*/
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    in_stream = fmt_ctx->streams[1];
    AVCodecParameters *in_codecpar = in_stream->codecpar;
    if(in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO){
        av_log(NULL, AV_LOG_ERROR, "The Codec type is invalid!\n");
        exit(1);
    }

    //out file
    ofmt_ctx = avformat_alloc_context();
    output_fmt = av_guess_format(NULL, dst_filename, NULL);
    if(!output_fmt){
        av_log(NULL, AV_LOG_DEBUG, "Cloud not guess file format \n");
        exit(1);
    }

    ofmt_ctx->oformat = output_fmt;

    out_stream = avformat_new_stream(ofmt_ctx, NULL);
    if(!out_stream){
        av_log(NULL, AV_LOG_DEBUG, "Failed to create out stream!\n");
        exit(1);
    }

    if(fmt_ctx->nb_streams<2){
        av_log(NULL, AV_LOG_ERROR, "the number of stream is too less!\n");
        exit(1);
    }


    if((err_code = avcodec_parameters_copy(out_stream->codecpar, in_codecpar)) < 0 ){
        av_strerror(err_code, errors, ERROR_STR_SIZE);
        av_log(NULL, AV_LOG_ERROR,
               "Failed to copy codec parameter, %d(%s)\n",
               err_code, errors);
    }

    out_stream->codecpar->codec_tag = 0;

    if((err_code = avio_open(&ofmt_ctx->pb, dst_filename, AVIO_FLAG_WRITE)) < 0) {
        av_strerror(err_code, errors, 1024);
        av_log(NULL, AV_LOG_DEBUG, "Could not open file %s, %d(%s)\n",
               dst_filename,
               err_code,
               errors);
        exit(1);
    }

    /*
    dst_fd = fopen(dst_filename, "wb");
    if (!dst_fd) {
        av_log(NULL, AV_LOG_DEBUG, "Could not open destination file %s\n", dst_filename);
        return -1;
    }
    */


    /*dump output information*/
    av_dump_format(ofmt_ctx, 0, dst_filename, 1);

    /*
    frame = av_frame_alloc();
    if(!frame){
        av_log(NULL, AV_LOG_DEBUG, "Could not allocate frame\n");
        return AVERROR(ENOMEM);
    }
    */

    /*initialize packet*/
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    /*find best audio stream*/
    audio_stream_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
    if(audio_stream_index < 0){
        av_log(NULL, AV_LOG_DEBUG, "Could not find %s stream in input file %s\n",
               av_get_media_type_string(AVMEDIA_TYPE_AUDIO),
               src_filename);
        return AVERROR(EINVAL);
    }

    if (avformat_write_header(ofmt_ctx, NULL) < 0) {
        av_log(NULL, AV_LOG_DEBUG, "Error occurred when opening output file");
        exit(1);
    }

    /*read frames from media file*/
    while(av_read_frame(fmt_ctx, &pkt) >=0 ){
        if(pkt.stream_index == audio_stream_index){
            pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            pkt.dts = pkt.pts;
            pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
            pkt.pos = -1;
            pkt.stream_index = 0;
            av_interleaved_write_frame(ofmt_ctx, &pkt);
            av_packet_unref(&pkt);
        }
    }

    av_write_trailer(ofmt_ctx);

    /*close input media file*/
    avformat_close_input(&fmt_ctx);
    if(dst_fd) {
        fclose(dst_fd);
    }

    avio_close(ofmt_ctx->pb);

    return 0;
}

5.3 抽取视频数据

Start code 在每一帧前面的关键字
将一帧一帧数据区分开:可以在每一帧前面加该帧的长度;也可以在每一帧前面加关键字或特征码;

SPS / PPS 解码的视频帧的参数 每个关键帧前加

codec->extradata 获取SPS和PPS是从编码器的扩展数据中获取,而非从数据包中

目的:
在每一个帧的前面增加Start Code特征码;
在每个关键帧前面增加SPS和PPS,同时也增加了特征码。

5.4 将MP4转成FLV格式

avformat_alloc_output_context2() / avformat_free_context()

avformat_new_stream()

avcodec_parameters_copy()



avformat_write_header() 生成多媒体文件头

av_write_frame() / av_interleaved_write_frame()

av_write_trailer()

可以将test.mpt 转换成 test.flv

#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;

    printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
           tag,
           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
           pkt->stream_index);
}

int main(int argc, char **argv)
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    const char *in_filename, *out_filename;
    int ret, i;
    int stream_index = 0;
    int *stream_mapping = NULL;
    int stream_mapping_size = 0;

    if (argc < 3) {
        printf("usage: %s input output\n"
               "API example program to remux a media file with libavformat and libavcodec.\n"
               "The output format is guessed according to the file extension.\n"
               "\n", argv[0]);
        return 1;
    }

    in_filename  = argv[1];
    out_filename = argv[2];


    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }

    av_dump_format(ifmt_ctx, 0, in_filename, 0);

    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    stream_mapping_size = ifmt_ctx->nb_streams;
    stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
    if (!stream_mapping) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    ofmt = ofmt_ctx->oformat;

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *out_stream;
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVCodecParameters *in_codecpar = in_stream->codecpar;

        if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
            in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
            in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
            stream_mapping[i] = -1;
            continue;
        }

        stream_mapping[i] = stream_index++;

        out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy codec parameters\n");
            goto end;
        }
        out_stream->codecpar->codec_tag = 0;
    }
    av_dump_format(ofmt_ctx, 0, out_filename, 1);

    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }

    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when write_header\n");
        goto end;
    }

    while (1) {
        AVStream *in_stream, *out_stream;

        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;

        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        if (pkt.stream_index >= stream_mapping_size ||
            stream_mapping[pkt.stream_index] < 0) {
            av_packet_unref(&pkt);
            continue;
        }

        pkt.stream_index = stream_mapping[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
        log_packet(ifmt_ctx, &pkt, "in");

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(ofmt_ctx, &pkt, "out");

        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_packet_unref(&pkt);
    }

    av_write_trailer(ofmt_ctx);
end:

    avformat_close_input(&ifmt_ctx);

    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);

    av_freep(&stream_mapping);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }

    return 0;
}

5.5 从MP4截取一段视频

av_seek_frame()

截取时,./cut 5 15 srcVideo toVideo将视频srcVideo的第5秒到第15秒存入toVideo

#include <stdlib.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;

    printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
           tag,
           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
           pkt->stream_index);
}

int cut_video(double from_seconds, double end_seconds, const char* in_filename, const char* out_filename) {
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret, i;

    //av_register_all();

    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }

    av_dump_format(ifmt_ctx, 0, in_filename, 0);

    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    ofmt = ofmt_ctx->oformat;

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codecpar->codec_tag = 0;
    }
    av_dump_format(ofmt_ctx, 0, out_filename, 1);

    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }

    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }

    ret = av_seek_frame(ifmt_ctx, -1, from_seconds*AV_TIME_BASE, AVSEEK_FLAG_ANY);
    if (ret < 0) {
        fprintf(stderr, "Error seek\n");
        goto end;
    }

    int64_t *dts_start_from = malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
    memset(dts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);
    int64_t *pts_start_from = malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
    memset(pts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);

    while (1) {
        AVStream *in_stream, *out_stream;

        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;

        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];

        log_packet(ifmt_ctx, &pkt, "in");

        if (av_q2d(in_stream->time_base) * pkt.pts > end_seconds) {
            av_packet_unref(&pkt);
            break;
        }

        if (dts_start_from[pkt.stream_index] == 0) {
            dts_start_from[pkt.stream_index] = pkt.dts;
            printf("dts_start_from: %s\n", av_ts2str(dts_start_from[pkt.stream_index]));
        }
        if (pts_start_from[pkt.stream_index] == 0) {
            pts_start_from[pkt.stream_index] = pkt.pts;
            printf("pts_start_from: %s\n", av_ts2str(pts_start_from[pkt.stream_index]));
        }

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts - pts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.dts = av_rescale_q_rnd(pkt.dts - dts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        if (pkt.pts < 0) {
            pkt.pts = 0;
        }
        if (pkt.dts < 0) {
            pkt.dts = 0;
        }
        if(pkt.pts < pkt.dts) continue;
        pkt.duration = (int)av_rescale_q((int64_t)pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(ofmt_ctx, &pkt, "out");
        printf("\n");

        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_packet_unref(&pkt);
    }
    free(dts_start_from);
    free(pts_start_from);

    av_write_trailer(ofmt_ctx);
end:

    avformat_close_input(&ifmt_ctx);

    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }

    return 0;
}

int main(int argc, char *argv[]){
    if(argc < 5){
        fprintf(stderr, "Usage: \
                command startime, endtime, srcfile, outfile");
        return -1;
    }

    double startime = atoi(argv[1]);
    double endtime = atoi(argv[2]);
    cut_video(startime, endtime, argv[3], argv[4]);

    return 0;
}

5.6 一个简单的小咖秀

  • 将两个媒体文件中分别抽取音频与视频

  • 将音频与视频轨合并成一个新文件

  • 对音频与视频轨进行裁剪

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值