ffmpeg学习笔记

ffmpeg学习笔记

ffmpeg 常用命令

  1. 录屏命令

    • 查看可用设备

      ffmpeg -devices
      
      Devices:
       D. = Demuxing supported
       .E = Muxing supported
       --
       D  dshow           DirectShow capture
       D  gdigrab         GDI API Windows frame grabber
       D  lavfi           Libavfilter virtual input device
        E sdl,sdl2        SDL2 output device
       D  vfwcap          VfW video capture
      
    • 设备录屏

      ffmpeg -f gdigrab -i "desktop" -r 30 out.yuv
      
    • 播放

      ffplay out.yuv -s 1920x1080 -pix_fmt bgra
      
  2. 分解和复用

    • 多媒体格式转换

      ffmpeg -i video1.mp4 -vcodec copy -acodec copy out.flv
      
    • 抽出视频

      ffmpeg -i video1.mp4 -vcodec copy -an out.h264
      
    • 抽出音频

      ffmpeg -i video1.mp4 -vn -acodec copy out.aac
      
  3. 处理原始数据

    • ffmep提取YUV数据

      ffmpeg -i video1.mp4 out.yuv
      播放:
      

    ffplay out.yuv -s 1024x576

    
    - ffmpeg提前PCM数据
    
    

    ffmpeg -i video1.mp4 -vn -ar 44100 -ac 2 -f s16le out.pcm
    播放:
    ffplay out.pcm -ar 44100 -ac 2 -f s16le

    
    
  4. 滤镜

    • 视频大小裁剪

      ffmpeg -i video1.mp4 -vf crop=in_w-200:in_h-200 out.mp4
      
  5. 裁剪

    • 音视频裁剪

      ffmpeg -i video1.mp4 -ss 00:00:00 -t 10 out.ts
      
    • 音视频合并

      ffmpeg -i concat inputs.txt out.flv
      
      file '1.ts'
      file '2.ts'
      
  6. 图片视频互转

    • 视频转图片

      ffmpeg -i video1.mp4 -r 1 -f image2 img/image-%3d.jpeg
      
    • 图片转视频

      ffmpeg -i img/image-%3d.jpeg out.mp4
      
  7. 直播推拉流

    • 直播推流

      ffmpeg -re -i out.mp4 -c copy -f flv rtmp://server/live/streamName
      
    • 直播拉流

      ffmpeg -i rtmp://server/live/streamName -c copy dump.flv
      

ffmpeg开发

  1. 日志

    
    extern "C"
    {
    #include <libavutil/log.h>
    }
    
    int main()
    {
    	/**
    	* \brief 打印日志信息
    	* \param level 日志级别
    	*/
    	av_log_set_level(AV_LOG_INFO);
    
    	/**
    	* \brief 打印日志
    	* \param avcl NULL
    	* \param level 日志级别
    	* \param fmt 日志内容,可以类似printf,使用%s,%d等占位符
    	* \param ...
    	*/
    	av_log(nullptr, AV_LOG_INFO, "Hello world\n");
        return 0;
    }
    
  2. 打印多媒体文件信息

    extern "C"
    {
    #include <libavutil/log.h>
    #include <libavformat/avformat.h>
    }
    
    int main()
    {
    	/*
    	 * AVFormatContext 输入输出上下文
    	 */
    	AVFormatContext* fmt_ctx = nullptr;
    	/** av_register_all
    	* \brief 注册所有的编码器、解码器等
    	*/
    	av_register_all();
    	/** avformat_open_input
    	* \brief 打开文件上下文
    	* \param ps 文件上下指针地址
    	* \param url 文件URL地址
    	* \param fmt 输入文件格式,如果不填,则根据后缀来解析
    	* \param options null,用于命令参数
    	* \return
    	*/
    	int ret = avformat_open_input(&fmt_ctx, "../video1.mp4", nullptr, nullptr);
    	if(ret < 0)
    	{
    		av_log(nullptr, AV_LOG_ERROR, "open file fail\n");
    		return -1;
    	}
    	/** av_dump_format
    	* \brief 打印文件信息
    	* \param ic 上下文
    	* \param index 0,流的索引值
    	* \param url 多媒体文件名字
    	* \param is_output 是输入流(0)还是输出流(1)
    	*/
    	av_dump_format(fmt_ctx, 0, "../video1.mp4", 0);
    	/** avformat_close_input
    	* \brief 关闭文件上下文
    	* \param s 文件上下文指针地址
    	*/
    	avformat_close_input(&fmt_ctx);
    	return 0;
    }
    
    
  3. 使用FFMPEG抽取音频数据

    • 从文件中地区音频数据
    #define _CRT_SECURE_NO_WARNINGS
    extern "C"
    {
    #include <libavformat/avformat.h>
    }
     
    #define ADTS_HEADER_LEN  7;
     
    void adts_header_v1(char *szAdtsHeader, int dataLen){
     
    	int audio_object_type = 2;
    	int sampling_frequency_index = 4;  //4: 44100 Hz
    	int channel_config = 2;
     
    	int adtsLen = dataLen + 7;
     
    	szAdtsHeader[0] = 0xff;         //syncword:0xfff                          高8bits
    	szAdtsHeader[1] = 0xf0;         //syncword:0xfff                          低4bits
    	szAdtsHeader[1] |= (0 << 3);    //MPEG Version:0 for MPEG-4,1 for MPEG-2  1bit
    	szAdtsHeader[1] |= (0 << 1);    //Layer:0                                 2bits 
    	szAdtsHeader[1] |= 1;           //protection absent:1                     1bit
     
    	szAdtsHeader[2] = (audio_object_type - 1) << 6;            //profile:audio_object_type - 1                      2bits
    	szAdtsHeader[2] |= (sampling_frequency_index & 0x0f) << 2; //sampling frequency index:sampling_frequency_index  4bits 
    	szAdtsHeader[2] |= (0 << 1);                             //private bit:0                                      1bit
    	szAdtsHeader[2] |= (channel_config & 0x04) >> 2;           //channel configuration:channel_config               高1bit
     
    	szAdtsHeader[3] = (channel_config & 0x03) << 6;     //channel configuration:channel_config      低2bits
    	szAdtsHeader[3] |= (0 << 5);                      //original:0                               1bit
    	szAdtsHeader[3] |= (0 << 4);                      //home:0                                   1bit
    	szAdtsHeader[3] |= (0 << 3);                      //copyright id bit:0                       1bit  
    	szAdtsHeader[3] |= (0 << 2);                      //copyright id start:0                     1bit
    	szAdtsHeader[3] |= ((adtsLen & 0x1800) >> 11);           //frame length:value   高2bits
     
    	szAdtsHeader[4] = (uint8_t)((adtsLen & 0x7f8) >> 3);     //frame length:value    中间8bits
    	szAdtsHeader[5] = (uint8_t)((adtsLen & 0x7) << 5);       //frame length:value    低3bits
    	szAdtsHeader[5] |= 0x1f;                                 //buffer fullness:0x7ff 高5bits
    	szAdtsHeader[6] = 0xfc;
    }
    int main()
    {
    	av_log_set_level(AV_LOG_INFO);
    	av_register_all();
    	AVFormatContext *fmt_ctx = NULL;
    	//打开多媒体文件
    	int ret = avformat_open_input(&fmt_ctx, "./1.mp4", NULL, NULL);
    	if (ret < 0)
    	{
    		av_log(NULL, AV_LOG_INFO, "avformat_open_input fail!\n");
    		return -1;
    	}
    	av_dump_format(fmt_ctx, 0, "./1.mp4", 0);
     
    	FILE *file = fopen("./test.aac", "wb");
    	if (file == NULL)
    	{
    		av_log(NULL, AV_LOG_INFO, "fopen fail!\n");
    		goto FMT_ERROR;
    	}
     
    	ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
    	if (ret < 0)
    	{
    		av_log(NULL, AV_LOG_INFO, "av_find_best_stream fail!\n");
    		goto FMT_ERROR;
    	}
     
    	int audio_index = ret;
     
    	AVPacket pkt;
    	av_init_packet(&pkt);
    	int index = -1;
    	while ((index = av_read_frame(fmt_ctx, &pkt)) >= 0)
    	{
    		if (pkt.stream_index == audio_index)
    		{
    			char adts_header_buf[7];
    			adts_header_v1(adts_header_buf, pkt.size);
    			fwrite(adts_header_buf, 1, 7, file);
     
    			int n = fwrite(pkt.data, 1, pkt.size, file);
    			if (n != pkt.size)
    			{
    				av_log(NULL, AV_LOG_ERROR, "fwrite fail!\n");
    				goto FMT_ERROR;
    			}
    		}
    		av_packet_unref(&pkt);
    	}
     
    FMT_ERROR:
    	avformat_close_input(&fmt_ctx);
    	fclose(file);
    }
    
    • 从文件中提取视频流h264
    #define _CRT_SECURE_NO_WARNINGS
    extern "C"
    {
    #include <libavformat/avformat.h>
    }
     
    #ifndef AV_WB32
    #   define AV_WB32(p, val) do {                 \
    	uint32_t d = (val);                     \
    	((uint8_t*)(p))[3] = (d);               \
    	((uint8_t*)(p))[2] = (d) >> 8;            \
    	((uint8_t*)(p))[1] = (d) >> 16;           \
    	((uint8_t*)(p))[0] = (d) >> 24;           \
    } while (0)
    #endif
     
    #ifndef AV_RB16
    #   define AV_RB16(x)                           \
    	((((const uint8_t*)(x))[0] << 8) | \
    	((const uint8_t*)(x))[1])
    #endif
     
    static int alloc_and_copy(AVPacket *out,
    	const uint8_t *sps_pps, uint32_t sps_pps_size,
    	const uint8_t *in, uint32_t in_size)
    {
    	uint32_t offset = out->size;
    	uint8_t nal_header_size = offset ? 3 : 4;
    	int err;
     
    	err = av_grow_packet(out, sps_pps_size + in_size + nal_header_size);
    	if (err < 0)
    		return err;
    	//copy pps/sps
    	if (sps_pps)
    	{
    		memcpy(out->data + offset, sps_pps, sps_pps_size);
    	}
    	//copy data
    	memcpy(out->data + sps_pps_size + nal_header_size + offset, in, in_size);
    	//copy start code
    	if (!offset) {
    		AV_WB32(out->data + sps_pps_size, 1);
    	}
    	else 
    	{
    		(out->data + offset + sps_pps_size)[0] = 0;
    		(out->data + offset + sps_pps_size)[1] = 0;
    		(out->data + offset + sps_pps_size)[2] = 1;
    	}
     
    	return 0;
    }
     
    int h264_extradata_to_annexb(const uint8_t *codec_extradata, const int codec_extradata_size, AVPacket *out_extradata, int padding)
    {
    	uint16_t unit_size;
    	uint64_t total_size = 0;
    	uint8_t *out = NULL, unit_nb, sps_done = 0,
    		sps_seen = 0, pps_seen = 0, sps_offset = 0, pps_offset = 0;
    	const uint8_t *extradata = codec_extradata + 4;//扩展数据前4个字节是无用的,需要跳过
    	static const uint8_t nalu_header[4] = { 0, 0, 0, 1 };
    	int length_size = (*extradata++ & 0x3) + 1; // retrieve length coded size, 用于指示表示编码数据长度所需字节数
     
    	sps_offset = pps_offset = -1;
     
    	/* retrieve sps and pps unit(s) */
    	unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */
    	if (!unit_nb) {
    		goto pps;
    	}
    	else {
    		sps_offset = 0;
    		sps_seen = 1;
    	}
     
    	while (unit_nb--) {
    		int err;
     
    		unit_size = AV_RB16(extradata);
    		total_size += unit_size + 4;
    		if (total_size > INT_MAX - padding) {
    			av_log(NULL, AV_LOG_ERROR,
    				"Too big extradata size, corrupted stream or invalid MP4/AVCC bitstream\n");
    			av_free(out);
    			return AVERROR(EINVAL);
    		}
    		if (extradata + 2 + unit_size > codec_extradata + codec_extradata_size) {
    			av_log(NULL, AV_LOG_ERROR, "Packet header is not contained in global extradata, "
    				"corrupted stream or invalid MP4/AVCC bitstream\n");
    			av_free(out);
    			return AVERROR(EINVAL);
    		}
    		if ((err = av_reallocp(&out, total_size + padding)) < 0)
    			return err;
    		memcpy(out + total_size - unit_size - 4, nalu_header, 4);
    		memcpy(out + total_size - unit_size, extradata + 2, unit_size);
    		extradata += 2 + unit_size;
    	pps:
    		if (!unit_nb && !sps_done++) {
    			unit_nb = *extradata++; /* number of pps unit(s) */
    			if (unit_nb) {
    				pps_offset = total_size;
    				pps_seen = 1;
    			}
    		}
    	}
     
    	if (out)
    		memset(out + total_size, 0, padding);
     
    	if (!sps_seen)
    		av_log(NULL, AV_LOG_WARNING,
    		"Warning: SPS NALU missing or invalid. "
    		"The resulting stream may not play.\n");
     
    	if (!pps_seen)
    		av_log(NULL, AV_LOG_WARNING,
    		"Warning: PPS NALU missing or invalid. "
    		"The resulting stream may not play.\n");
     
    	out_extradata->data = out;
    	out_extradata->size = total_size;
     
    	return length_size;
    }
     
     
    int h264_mp4toannexb(AVFormatContext *fmt_ctx, AVPacket *in, FILE *dst_fd)
    {
    	AVPacket spspps_pkt;
    	uint32_t cumul_size = 0;
    	AVPacket *out = av_packet_alloc();
     
    	const uint8_t *buf		= in->data;
    	int buf_size			= in->size;
    	const uint8_t *buf_end	= in->data + in->size;
    	int ret = -1;
     
    	do {
    		ret = AVERROR(EINVAL);
    		if (buf + 4 /*s->length_size*/ > buf_end)
    			goto fail;
    		int32_t nal_size = 0;
    		//大端模式---》低字节在高地址,高字节在低地址,0x12345678,存放的buf[0]->12, buf[1]->34, buf[2]->56, buf[1]->78
    		for (int i = 0; i < 4; i++)
    			nal_size = (nal_size << 8) | buf[i];
     
    		buf += 4; /*s->length_size;*/
    		//一帧数据的第一个字节后五位是这一帧的类型sps pps 
    		uint8_t unit_type = *buf & 0x1f;
     
    		if (nal_size > buf_end - buf || nal_size < 0)
    			goto fail;
    		//IDR frame
    		if (unit_type == 5) 
    		{
    			//关键帧需要添加sps pps
    			h264_extradata_to_annexb(fmt_ctx->streams[in->stream_index]->codec->extradata,
    				fmt_ctx->streams[in->stream_index]->codec->extradata_size,
    				&spspps_pkt,
    				AV_INPUT_BUFFER_PADDING_SIZE);
    			//添加特征码
    			if ((ret = alloc_and_copy(out, spspps_pkt.data, spspps_pkt.size, buf, nal_size)) < 0)
    				goto fail;
    		}
    		else 
    		{
    			//非关键帧不需要添加sps pps
    			if ((ret = alloc_and_copy(out, NULL, 0, buf, nal_size)) < 0)
    				goto fail;
    		}
     
    		int len = fwrite(out->data, 1, out->size, dst_fd);
    		if (len != out->size)
    		{
    			av_log(NULL, AV_LOG_DEBUG, "warning, length of writed data isn't equal pkt.size(%d, %d)\n", len, out->size);
    		}
    		fflush(dst_fd);
     
    	next_nal:
    		buf += nal_size;
    		cumul_size += nal_size + 4;//s->length_size;
    	}
    	while (cumul_size < buf_size);
    fail:
    	av_packet_free(&out);
     
    	return ret;
    }
    int main()
    {
    	av_log_set_level(AV_LOG_INFO);
    	av_register_all();
    	AVFormatContext *fmt_ctx = NULL;
    	//打开多媒体文件
    	int ret = avformat_open_input(&fmt_ctx, "./1.mp4", NULL, NULL);
    	if (ret < 0)
    	{
    		av_log(NULL, AV_LOG_INFO, "avformat_open_input fail!\n");
    		return -1;
    	}
    	av_dump_format(fmt_ctx, 0, "./1.mp4", 0);
     
    	FILE *file = fopen("./test.h264", "wb");
    	if (file == NULL)
    	{
    		av_log(NULL, AV_LOG_INFO, "fopen fail!\n");
    		goto FMT_ERROR;
    	}
     
    	ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    	if (ret < 0)
    	{
    		av_log(NULL, AV_LOG_INFO, "av_find_best_stream fail!\n");
    		goto FMT_ERROR;
    	}
     
    	int video_index = ret;
     
    	AVPacket pkt;
    	av_init_packet(&pkt);
    	pkt.data = NULL;
    	pkt.size = 0;
    	int index = -1;
    	while ((index = av_read_frame(fmt_ctx, &pkt)) >= 0)
    	{
    		if (pkt.stream_index == video_index)
    		{
    			h264_mp4toannexb(fmt_ctx, &pkt, file);
    		}
    		av_packet_unref(&pkt);
    	}
     
    FMT_ERROR:
    	avformat_close_input(&fmt_ctx);
    	fclose(file);
    }
    
    • MP4转FLV
    
    extern "C"
    {
    #include <libavutil/log.h>
    #include <libavutil/avutil.h>
    #include <libavformat/avformat.h>
    }
    
    int main()
    {
    	av_register_all();
    	AVFormatContext* in_context = nullptr;
    	AVFormatContext* out_context = nullptr;
    	if(avformat_open_input(&in_context, "video1.mp4", nullptr, nullptr)<0)
    	{
    		std::cerr << "avformat_open_input";
    		return -1;
    	}
    	if(avformat_find_stream_info(in_context,0)< 0)
    	{
    		std::cerr << "avformat_find_stream_info";
    		return -1;
    	}
    	if(avformat_alloc_output_context2(&out_context, nullptr, nullptr, "out.flv") < 0)
    	{
    		std::cerr << "avformat_alloc_output_context2";
    		return -1;
    	}
    	int stream_mapping_size = in_context->nb_streams;
    	//int stream_output_size = 0;
    	//int* stream_mapping = (int*)av_malloc_array(stream_mapping_size, sizeof(*stream_mapping));
    	//if(stream_mapping == nullptr)
    	//{
    	//	std::cerr << "stream_mapping == nullptr";
    	//	return -1;
    	//}
    	for(int i=0;i<stream_mapping_size;i++)
    	{
    		AVStream* in_stream = in_context->streams[i];
    		AVCodecParameters* in_codecpar = in_stream->codecpar;
    		//if(in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
    		//	in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
    		//	in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE)
    		//{
    		//	stream_mapping[i] = -1;
    		//	continue;
    		//}
    		//stream_mapping[i] = stream_output_size++;
    		AVStream* out_stream = avformat_new_stream(out_context, nullptr);
    		if(out_stream == nullptr)
    		{
    			std::cerr << "avformat_new_stream fail";
    			return -1;
    		}
    		if(avcodec_parameters_copy(out_stream->codecpar, in_codecpar) < 0)
    		{
    			std::cerr << "avcodec_parameters_copy";
    			return -1;
    		}
    		out_stream->codecpar->codec_tag = 0;
    	}
    	if(avio_open(&out_context->pb, "out.flv", AVIO_FLAG_WRITE) < 0)
    	{
    		std::cerr << "avio_open fail";
    		return -1;
    	}
    	if(avformat_write_header(out_context, nullptr))
    	{
    		std::cerr << "avformat_write_header";
    		return -1;
    	}
    	while (true)
    	{
    		AVStream* in_stream, *out_stream;
    		AVPacket pkt;
    		if(av_read_frame(in_context, &pkt)<0)
    		{
    			break;
    		}
    		in_stream = in_context->streams[pkt.stream_index];
    		//if(pkt.stream_index >= stream_output_size || stream_mapping[pkt.stream_index] < 0)
    		//{
    		//	av_packet_unref(&pkt);
    		//	continue;
    		//}
    		//pkt.stream_index = stream_mapping[pkt.stream_index];
    		out_stream = out_context->streams[pkt.stream_index];
    
    		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
    		pkt.pos = -1;
    		if(av_interleaved_write_frame(out_context, &pkt)<0)
    		{
    			std::cerr << "av_interleaved_write_frame";
    			return -1;
    		}
    		av_packet_unref(&pkt);
    	}
    	av_write_trailer(out_context);
    	avformat_close_input(&in_context);
    	avio_closep(&out_context->pb);
    	//av_freep(&stream_mapping);
    	return 0;
    }
    
    
    
    • 从MP4接取一段视频
    #include <stdlib.h>
    #include <libavutil/timestamp.h>
    #include <libavformat/avformat.h>
     
    static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
    {
        AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
     
        printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
               tag,
               av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
               av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
               av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
               pkt->stream_index);
    }
     
    int cut_video(double from_seconds, double end_seconds, const char* in_filename, const char* out_filename) {
        AVOutputFormat *ofmt = NULL;
        AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
        AVPacket pkt;
        int ret, i;
     
        av_register_all();
     
        if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
            fprintf(stderr, "Could not open input file '%s'", in_filename);
            goto end;
        }
     
        if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
            fprintf(stderr, "Failed to retrieve input stream information");
            goto end;
        }
     
        av_dump_format(ifmt_ctx, 0, in_filename, 0);
    	//输出上下文
        avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
        if (!ofmt_ctx) {
            fprintf(stderr, "Could not create output context\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
     
        ofmt = ofmt_ctx->oformat;
     
        for (i = 0; i < ifmt_ctx->nb_streams; i++) 
    	{
            AVStream *in_stream = ifmt_ctx->streams[i];
            AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
            if (!out_stream) {
                fprintf(stderr, "Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
    		//拷贝流信息
            ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
            if (ret < 0) {
                fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_stream->codec->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
                out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        }
        av_dump_format(ofmt_ctx, 0, out_filename, 1);
     
        if (!(ofmt->flags & AVFMT_NOFILE)) {
            ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
            if (ret < 0) {
                fprintf(stderr, "Could not open output file '%s'", out_filename);
                goto end;
            }
        }
    	//写多媒体信息头
        ret = avformat_write_header(ofmt_ctx, NULL);
        if (ret < 0) {
            fprintf(stderr, "Error occurred when opening output file\n");
            goto end;
        }
     
    	//定位跳去到指定流位置 秒数*时间基=开始剪切的位置
        ret = av_seek_frame(ifmt_ctx, -1, from_seconds*AV_TIME_BASE, AVSEEK_FLAG_ANY);
        if (ret < 0) {
            fprintf(stderr, "Error seek\n");
            goto end;
        }
     
        int64_t *dts_start_from = malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
        memset(dts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);
        int64_t *pts_start_from = malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
        memset(pts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);
     
        while (1) {
            AVStream *in_stream, *out_stream;
     
            ret = av_read_frame(ifmt_ctx, &pkt);
            if (ret < 0)
                break;
     
            in_stream  = ifmt_ctx->streams[pkt.stream_index];
            out_stream = ofmt_ctx->streams[pkt.stream_index];
     
            log_packet(ifmt_ctx, &pkt, "in");
    		//和结束位置比较
            if (av_q2d(in_stream->time_base) * pkt.pts > end_seconds) {
                av_free_packet(&pkt);
                break;
            }
     
            if (dts_start_from[pkt.stream_index] == 0) {
                dts_start_from[pkt.stream_index] = pkt.dts;
                printf("dts_start_from: %s\n", av_ts2str(dts_start_from[pkt.stream_index]));
            }
            if (pts_start_from[pkt.stream_index] == 0) {
                pts_start_from[pkt.stream_index] = pkt.pts;
                printf("pts_start_from: %s\n", av_ts2str(pts_start_from[pkt.stream_index]));
            }
     
            /* copy packet *///时间基转换  pts 就是每一帧数据的播放时间 
            pkt.pts = av_rescale_q_rnd(pkt.pts - pts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
            pkt.dts = av_rescale_q_rnd(pkt.dts - dts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
            if (pkt.pts < 0) {
                pkt.pts = 0;
            }
            if (pkt.dts < 0) {
                pkt.dts = 0;
            }
            pkt.duration = (int)av_rescale_q((int64_t)pkt.duration, in_stream->time_base, out_stream->time_base);
            pkt.pos = -1;
            log_packet(ofmt_ctx, &pkt, "out");
            printf("\n");
     
            ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
            if (ret < 0) {
                fprintf(stderr, "Error muxing packet\n");
                break;
            }
            av_free_packet(&pkt);
        }
        free(dts_start_from);
        free(pts_start_from);
     
        av_write_trailer(ofmt_ctx);
    end:
     
        avformat_close_input(&ifmt_ctx);
     
        /* close output */
        if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
            avio_closep(&ofmt_ctx->pb);
        avformat_free_context(ofmt_ctx);
     
        if (ret < 0 && ret != AVERROR_EOF) {
            fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
            return 1;
        }
     
        return 0;
    }
     
    int main(int argc, char *argv[]){
        if(argc < 5){
            fprintf(stderr, "Usage: \
                    command startime, endtime, srcfile, outfile");
            return -1;
        }
     
        double startime = atoi(argv[1]);
        double endtime = atoi(argv[2]);
        cut_video(startime, endtime, argv[3], argv[4]);
     
        return 0;
    }
     
    

FFMpeg中级

  1. H264编码

extern "C"
{
#include <libavutil/log.h>
#include <libavutil/opt.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <assert.h>
}

int main()
{
	avcodec_register_all();
	//1. 查找编码器
	AVCodec * codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	assert(codec);
	//2. 创建编码器上下文
	AVCodecContext * context = avcodec_alloc_context3(codec);
	assert(context);
	//3. 定义编码器参数:宽度,高度、时间基,格式
	context->width = 352;
	context->height = 288;
	context->time_base = AVRational{1, 25};
	context->pix_fmt = AV_PIX_FMT_YUV420P;
	//4. 打开编码器
	assert(avcodec_open2(context, codec, nullptr) >= 0);
	//5. 创建帧,并定义好帧的宽度,高度,格式
	AVFrame* frame = av_frame_alloc();
	assert(frame);
	frame->format = context->pix_fmt;
	frame->width = context->width;
	frame->height = context->height;
	//6. 此函数的功能是按照指定的宽、高、像素格式来分配图像内存
	assert(av_image_alloc(frame->data, frame->linesize, context->width,
		context->height, context->pix_fmt, 32) >= 0);

	FILE* f = fopen("out.h264", "wb");
	
	AVPacket pkt;
	for(int i=0;i<25;i++)
	{
		av_init_packet(&pkt);
		pkt.data = nullptr;
		pkt.size = 0;
		int x, y;
		for (y = 0; y < context->height; y++) {
			for (x = 0; x < context->width; x++) {
				frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
			}
		}
		/* Cb and Cr */
		for (y = 0; y < context->height / 2; y++) {
			for (x = 0; x < context->width / 2; x++) {
				frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
				frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
			}
		}
		frame->pts = i;
		int got_puput;
		//7. 编码
		assert(avcodec_encode_video2(context, &pkt, frame, &got_puput) >= 0) ;
		if(got_puput)
		{
			fwrite(pkt.data, 1, pkt.size, f);
			av_packet_unref(&pkt);
		}
	}
	fclose(f);
	avcodec_close(context);
	av_free(context);
	av_freep(&frame->data[0]);
	av_frame_free(&frame);
	return 0;
}

扩展,将文件存储为mp4格式

extern "C"
{
#include <libavutil/log.h>
#include <libavutil/opt.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <assert.h>
}

int main()
{
	avcodec_register_all();
	//1. 查找编码器
	AVCodec * codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	assert(codec);
	//2. 创建编码器上下文
	AVCodecContext * context = avcodec_alloc_context3(codec);
	assert(context);
	//3. 定义编码器参数:宽度,高度、时间基,格式
	context->width = 352;
	context->height = 288;
	context->time_base = AVRational{1, 25};
	context->pix_fmt = AV_PIX_FMT_YUV420P;
	//4. 打开编码器
	assert(avcodec_open2(context, codec, nullptr) >= 0);
	//5. 创建帧,并定义好帧的宽度,高度,格式
	AVFrame* frame = av_frame_alloc();
	assert(frame);
	frame->format = context->pix_fmt;
	frame->width = context->width;
	frame->height = context->height;
	//6. 此函数的功能是按照指定的宽、高、像素格式来分配图像内存
	assert(av_image_alloc(frame->data, frame->linesize, context->width,
		context->height, context->pix_fmt, 32) >= 0);

	//7. 创建mp4输出上下文
	AVFormatContext* out_context = nullptr;
	assert(avformat_alloc_output_context2(&out_context, nullptr, nullptr, "out.mp4") >= 0);
	//8. 创建视频流
	AVStream* out_stream = avformat_new_stream(out_context, nullptr);
	out_stream->codecpar->width = context->width;
	out_stream->codecpar->height = context->height;
	out_stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
	out_stream->codecpar->codec_id = AV_CODEC_ID_H264;
	out_stream->codecpar->codec_tag = 0;
	//9. 打开输出上下文
	assert(avio_open(&out_context->pb, "out.mp4", AVIO_FLAG_WRITE) >= 0);
	avformat_write_header(out_context, nullptr);
	
	AVPacket pkt;
	for(int i=0;i<25;i++)
	{
		av_init_packet(&pkt);
		pkt.data = nullptr;
		pkt.size = 0;
		int x, y;
		for (y = 0; y < context->height; y++) {
			for (x = 0; x < context->width; x++) {
				frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
			}
		}
		/* Cb and Cr */
		for (y = 0; y < context->height / 2; y++) {
			for (x = 0; x < context->width / 2; x++) {
				frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
				frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
			}
		}
		frame->pts = i;
		int got_puput;
		//7. 编码
		assert(avcodec_encode_video2(context, &pkt, frame, &got_puput) >= 0) ;
		if(got_puput)
		{
			out_stream = out_context->streams[0];
			
			pkt.pts = av_rescale_q_rnd(pkt.pts, context->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pkt.dts = av_rescale_q_rnd(pkt.dts, context->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pkt.duration = av_rescale_q(pkt.duration, context->time_base, out_stream->time_base);
			pkt.pos = -1;
			
			assert(av_interleaved_write_frame(out_context, &pkt) >= 0);
			av_packet_unref(&pkt);
		}
	}
	av_write_trailer(out_context);
	avio_closep(&out_context->pb);
	avcodec_close(context);
	av_free(context);
	av_freep(&frame->data[0]);
	av_frame_free(&frame);
	return 0;
}


  1. 视频转图片
// study_ffmpeg.cpp : 定义控制台应用程序的入口点。
//

#include <iostream>

#include "stdafx.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C"
{
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
}
#define INBUF_SIZE 4096

#define WORD uint16_t
#define DWORD uint32_t
#define LONG int32_t

#pragma pack(2)
typedef struct tagBITMAPFILEHEADER {
	WORD  bfType;
	DWORD bfSize;
	WORD  bfReserved1;
	WORD  bfReserved2;
	DWORD bfOffBits;
} BITMAPFILEHEADER, *PBITMAPFILEHEADER;


typedef struct tagBITMAPINFOHEADER {
	DWORD biSize;
	LONG  biWidth;
	LONG  biHeight;
	WORD  biPlanes;
	WORD  biBitCount;
	DWORD biCompression;
	DWORD biSizeImage;
	LONG  biXPelsPerMeter;
	LONG  biYPelsPerMeter;
	DWORD biClrUsed;
	DWORD biClrImportant;
} BITMAPINFOHEADER, *PBITMAPINFOHEADER;

void saveBMP(struct SwsContext *img_convert_ctx, AVFrame *frame, char *filename)
{
	//1 先进行转换,  YUV420=>RGB24:
	int w = frame->width;
	int h = frame->height;


	int numBytes = avpicture_get_size(AV_PIX_FMT_BGR24, w, h);
	uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));


	AVFrame *pFrameRGB = av_frame_alloc();
	avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, w, h);

	sws_scale(img_convert_ctx, frame->data, frame->linesize,
		0, h, pFrameRGB->data, pFrameRGB->linesize);

	//2 构造 BITMAPINFOHEADER
	BITMAPINFOHEADER header;
	header.biSize = sizeof(BITMAPINFOHEADER);


	header.biWidth = w;
	header.biHeight = h*(-1);
	header.biBitCount = 24;
	header.biCompression = 0;
	header.biSizeImage = 0;
	header.biClrImportant = 0;
	header.biClrUsed = 0;
	header.biXPelsPerMeter = 0;
	header.biYPelsPerMeter = 0;
	header.biPlanes = 1;

	//3 构造文件头
	BITMAPFILEHEADER bmpFileHeader = { 0, };
	//HANDLE hFile = NULL;
	DWORD dwTotalWriten = 0;
	DWORD dwWriten;

	bmpFileHeader.bfType = 0x4d42; //'BM';
	bmpFileHeader.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + numBytes;
	bmpFileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);

	FILE* pf = fopen(filename, "wb");
	fwrite(&bmpFileHeader, sizeof(BITMAPFILEHEADER), 1, pf);
	fwrite(&header, sizeof(BITMAPINFOHEADER), 1, pf);
	fwrite(pFrameRGB->data[0], 1, numBytes, pf);
	fclose(pf);


	//释放资源
	//av_free(buffer);
	av_freep(&pFrameRGB[0]);
	av_free(pFrameRGB);
}

static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
	char *filename)
{
	FILE *f;
	int i;

	f = fopen(filename, "w");
	fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
	for (i = 0; i < ysize; i++)
		fwrite(buf + i * wrap, 1, xsize, f);
	fclose(f);
}

static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
	struct SwsContext *img_convert_ctx, AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
	int len, got_frame;
	char buf[1024];

	len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
	if (len < 0) {
		fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
		return len;
	}
	if (got_frame) {
		printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
		fflush(stdout);

		/* the picture is allocated by the decoder, no need to free it */
		snprintf(buf, sizeof(buf), "%s-%d.bmp", outfilename, *frame_count);

		/*
		pgm_save(frame->data[0], frame->linesize[0],
		frame->width, frame->height, buf);
		*/

		saveBMP(img_convert_ctx, frame, buf);

		(*frame_count)++;
	}
	return 0;
}

int main(int argc, char **argv)
{
	int ret;
	const char *filename, *outfilename;

	AVFormatContext *fmt_ctx = NULL;

	const AVCodec *codec;
	AVCodecContext *c = NULL;

	AVStream *st = NULL;
	int stream_index;

	int frame_count;
	AVFrame *frame;

	struct SwsContext *img_convert_ctx;

	AVPacket avpkt;


	filename = "out.mp4";
	outfilename = "";

	/* register all formats and codecs */
	av_register_all();

	/* open input file, and allocate format context */
	if (avformat_open_input(&fmt_ctx, filename, NULL, NULL) < 0) {
		fprintf(stderr, "Could not open source file %s\n", filename);
		exit(1);
	}

	/* retrieve stream information */
	if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
		fprintf(stderr, "Could not find stream information\n");
		exit(1);
	}

	/* dump input information to stderr */
	av_dump_format(fmt_ctx, 0, filename, 0);

	av_init_packet(&avpkt);

	ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
	if (ret < 0) {
		fprintf(stderr, "Could not find %s stream in input file '%s'\n",
			av_get_media_type_string(AVMEDIA_TYPE_VIDEO), filename);
		return ret;
	}

	stream_index = ret;
	st = fmt_ctx->streams[stream_index];

	/* find decoder for the stream */
	codec = avcodec_find_decoder(st->codecpar->codec_id);
	if (!codec) {
		fprintf(stderr, "Failed to find %s codec\n",
			av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
		return AVERROR(EINVAL);
	}
	c = avcodec_alloc_context3(NULL);
	if (!c) {
		fprintf(stderr, "Could not allocate video codec context\n");
		exit(1);
	}

	/* Copy codec parameters from input stream to output codec context */
	if ((ret = avcodec_parameters_to_context(c, st->codecpar)) < 0) {
		fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
			av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
		return ret;
	}

	/* open it */
	if (avcodec_open2(c, codec, NULL) < 0) {
		fprintf(stderr, "Could not open codec\n");
		exit(1);
	}

	img_convert_ctx = sws_getContext(c->width, c->height,
		c->pix_fmt,
		c->width, c->height,
		AV_PIX_FMT_BGR24,
		SWS_BICUBIC, NULL, NULL, NULL);

	if (img_convert_ctx == NULL)
	{
		fprintf(stderr, "Cannot initialize the conversion context\n");
		exit(1);
	}

	frame = av_frame_alloc();
	if (!frame) {
		fprintf(stderr, "Could not allocate video frame\n");
		exit(1);
	}

	frame_count = 0;
	while (av_read_frame(fmt_ctx, &avpkt) >= 0) {
		if (avpkt.stream_index == stream_index) {
			if (decode_write_frame(outfilename, c, img_convert_ctx, frame, &frame_count, &avpkt, 0) < 0)
				exit(1);
		}

		av_packet_unref(&avpkt);
	}

	avpkt.data = NULL;
	avpkt.size = 0;
	decode_write_frame(outfilename, c, img_convert_ctx, frame, &frame_count, &avpkt, 1);
	avformat_close_input(&fmt_ctx);

	sws_freeContext(img_convert_ctx);
	avcodec_free_context(&c);
	av_frame_free(&frame);

	return 0;
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值