ffmpeg视频关键帧提取保存为图片

本代码基于windows环境,代码中间的注释都是来自ffmpeg官方例子,没有删掉,仅做参考。源码下载地址:http://download.csdn.net/detail/jw20082009jw/9720626

自己录制了一段比较小的mp4视频,因为公司的目标就是对小视频做处理,所以我暂时不会去研究我当前代码对大视频情况下的影响。

本程序基于ffmpeg官方例子demuxing_decoding.c文件来改写的,可以将mp4文件分离成未编码的视频裸流和音频裸流,另外增加了提取mp4关键帧,并保存为jpg格式图片的部分代码。


输入参数两个:待处理的MP4文件    保存生成图片和裸流文件的地址

输出文件:mp4.vr视频裸流文件、mp4.ar音频裸流文件、其余.jpg后缀的为关键帧提取后图片数据文件名为该帧pts值

pts(Presentation time stamp):视频帧的展示时间


vr文件的播放:可以使用ffplay来进行播放,具体命令会在程序执行完后有提示(例如我的视频参数是如下命令:ffplay -f rawvideo -pix_fmt yuv420p -video_size 320x240 F:\ffmpeg\ffmpegtest\mp4.vr)

ar文件的播放也和vr文件的播放同样会在执行完后有提示ffplay的播放方法(我的音频参数如下:ffplay -f f32le -ac 1 -ar 8000 F:\ffmpeg\ffmpegtest\mp4.ar)

具体代码如下:

/**
 * @file
 * Demuxing and decoding example.
 *
 * Show how to use the libavformat and libavcodec API to demux and
 * decode audio and video data.
 * @example demuxing_decoding.c
 */

#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>

static AVFormatContext *fmt_ctx = NULL;
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
static int width, height;
static enum AVPixelFormat pix_fmt;
static AVStream *video_stream = NULL, *audio_stream = NULL;
static const char *src_filename = NULL;
static char video_dst_filename[50] = {0};
static char audio_dst_filename[50] = {0};
static char *dst_pathname = NULL;
static FILE *video_dst_file = NULL;
static FILE *audio_dst_file = NULL;
static FILE *video_dst_filetemp = NULL;

static uint8_t *video_dst_data[4] = {NULL};
static int      video_dst_linesize[4];
static int video_dst_bufsize;

static int video_stream_idx = -1, audio_stream_idx = -1;
static AVFrame *frame = NULL;
static AVPacket pkt;
static int video_frame_count = 0;
static int audio_frame_count = 0;

static AVCodecContext* pCodecCtx = NULL;  
static AVCodec* pCodec = NULL;  
static AVPacket pkt_out;
static char file_name[50] = {0};
/* Enable or disable frame reference counting. You are not supposed to support
 * both paths in your application but pick the one most appropriate to your
 * needs. Look for the use of refcount in this example to see what are the
 * differences of API usage between them. */
static int refcount = 0;

static void  save_picture_uinit(FILE * pFile,AVPacket pkt)
{
	fwrite(pkt.data, sizeof(uint8_t),pkt.size, pFile);
	fclose(pFile);
	pFile = NULL;
	av_free_packet(&pkt);
}

void generate_file_name(char * file_name,char * file_path,long long pts)
{
	if (file_path[strlen(file_path) - 1] == '\\')
	{
		sprintf(file_name,"%s%lld.jpg",file_path,pts);
	}
	else
	{
		sprintf(file_name,"%s\\%lld.jpg",file_path,pts);
	}
}

static int decode_packet(int *got_frame, int cached)
{
    int ret = 0;
    int decoded = pkt.size;

    *got_frame = 0;

    if (pkt.stream_index == video_stream_idx) {
        /* decode video frame */
        ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            //fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
            return ret;
        }

        if (*got_frame) {

            if (frame->width != width || frame->height != height ||
                frame->format != pix_fmt) {
                /* To handle this change, one could call av_image_alloc again and
                 * decode the following frames into another rawvideo file. */
                fprintf(stderr, "Error: Width, height and pixel format have to be "
                        "constant in a rawvideo file, but the width, height or "
                        "pixel format of the input video changed:\n"
                        "old: width = %d, height = %d, format = %s\n"
                        "new: width = %d, height = %d, format = %s\n",
                        width, height, av_get_pix_fmt_name(pix_fmt),
                        frame->width, frame->height,
                        av_get_pix_fmt_name(frame->format));
                return -1;
            }

            printf("video_frame%s n:%d coded_n:%d\n",
                   cached ? "(cached)" : "",
                   video_frame_count++, frame->coded_picture_number);

            /* copy decoded frame to destination buffer:
             * this is required since rawvideo expects non aligned data */
            av_image_copy(video_dst_data, video_dst_linesize,
                          (const uint8_t **)(frame->data), frame->linesize,
                          pix_fmt, width, height);

			//key frame
			if(pkt.flags == 1){
				av_new_packet(&pkt_out,pCodecCtx->width * pCodecCtx->height * 3);
				ret = avcodec_encode_video2(pCodecCtx, &pkt_out,frame, got_frame);  
				if(ret < 0)
				{  
					printf("Encode Error.\n");  
					avcodec_close(pCodecCtx);
					av_free(pCodecCtx);
					av_free_packet(&pkt_out);
					return 0;  
				}  
				//success encode
				if (got_frame)
				{  

					generate_file_name(file_name,dst_pathname,pkt.pts);

					//Method 1
					//save_picture_init(&pFormatCtx,video_st,pCodecCtx,file_name);
					//save_picture_uinit_2(pFormatCtx,pkt_out);
					//Method 2
					video_dst_filetemp = fopen(file_name, "wb");
					if (!video_dst_filetemp) {
						fprintf(stderr, "Could not open destination file %s\n", video_dst_filetemp);
					}
					if(video_dst_filetemp)
					{
						save_picture_uinit(video_dst_filetemp,pkt_out);
					}
				} 
			}
            /* write to rawvideo file */
			fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
		
        }
    } else if (pkt.stream_index == audio_stream_idx) {
        /* decode audio frame */
        ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            //fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
            return ret;
        }
        /* Some audio decoders decode only part of the packet, and have to be
         * called again with the remainder of the packet data.
         * Sample: fate-suite/lossless-audio/luckynight-partial.shn
         * Also, some decoders might over-read the packet. */
        decoded = FFMIN(ret, pkt.size);

        if (*got_frame) {
            size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
            /*
			printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   audio_frame_count++, frame->nb_samples,
                   av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
				   */

            /* Write the raw audio data samples of the first plane. This works
             * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
             * most audio decoders output planar audio, which uses a separate
             * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
             * In other words, this code will write only the first audio channel
             * in these cases.
             * You should use libswresample or libavfilter to convert the frame
             * to packed data. */
            fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
        }
    }

    /* If we use frame reference counting, we own the data and need
     * to de-reference it when we don't use it anymore */
    if (*got_frame && refcount)
        av_frame_unref(frame);

    return decoded;
}

static int open_codec_context(int *stream_idx,
                              AVCodecContext **dec_ctx, AVFormatContext *fmt_ctx, enum AVMediaType type)
{
    int ret, stream_index;
    AVStream *st;
    AVCodec *dec = NULL;
    AVDictionary *opts = NULL;

    ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not find %s stream in input file '%s'\n",
                av_get_media_type_string(type), src_filename);
        return ret;
    } else {
        stream_index = ret;
        st = fmt_ctx->streams[stream_index];

        /* find decoder for the stream */
        dec = avcodec_find_decoder(st->codecpar->codec_id);
        if (!dec) {
            fprintf(stderr, "Failed to find %s codec\n",
                    av_get_media_type_string(type));
            return AVERROR(EINVAL);
        }

        /* Allocate a codec context for the decoder */
        *dec_ctx = avcodec_alloc_context3(dec);
        if (!*dec_ctx) {
            fprintf(stderr, "Failed to allocate the %s codec context\n",
                    av_get_media_type_string(type));
            return AVERROR(ENOMEM);
        }

        /* Copy codec parameters from input stream to output codec context */
        if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) {
            fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
                    av_get_media_type_string(type));
            return ret;
        }

        /* Init the decoders, with or without reference counting */
        av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
        if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
            fprintf(stderr, "Failed to open %s codec\n",
                    av_get_media_type_string(type));
            return ret;
        }
        *stream_idx = stream_index;
    }

    return 0;
}

static int get_format_from_sample_fmt(const char **fmt,
                                      enum AVSampleFormat sample_fmt)
{
    int i;
    struct sample_fmt_entry {
        enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
    } sample_fmt_entries[] = {
        { AV_SAMPLE_FMT_U8,  "u8",    "u8"    },
        { AV_SAMPLE_FMT_S16, "s16be", "s16le" },
        { AV_SAMPLE_FMT_S32, "s32be", "s32le" },
        { AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
        { AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
    };
    *fmt = NULL;

    for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
        struct sample_fmt_entry *entry = &sample_fmt_entries[i];
        if (sample_fmt == entry->sample_fmt) {
            *fmt = AV_NE(entry->fmt_be, entry->fmt_le);
            return 0;
        }
    }

    fprintf(stderr,
            "sample format %s is not supported as output format\n",
            av_get_sample_fmt_name(sample_fmt));
    return -1;
}

int main (int argc, char **argv)
{
    int ret = 0, got_frame;

    if (argc != 3 && argc != 4) {
        fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n\n"
                "If the -refcount option is specified, the program use the\n"
                "reference counting frame system which allows keeping a copy of\n"
                "the data for longer than one decode call.\n"
                "\n", argv[0]);
        exit(1);
    }
    if (argc == 4 && !strcmp(argv[1], "-refcount")) {
        refcount = 1;
        argv++;
    }
    src_filename = argv[1];
	dst_pathname = argv[2];
	sprintf(video_dst_filename,"%s\\mp4.vr",argv[2]);
	sprintf(audio_dst_filename,"%s\\mp4.ar",argv[2]);

    /* register all formats and codecs */
    av_register_all();

    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }

    if (open_codec_context(&video_stream_idx, &video_dec_ctx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];

        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        width = video_dec_ctx->width;
        height = video_dec_ctx->height;
        pix_fmt = video_dec_ctx->pix_fmt;
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             width, height, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }

    if (open_codec_context(&audio_stream_idx, &audio_dec_ctx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
            ret = 1;
            goto end;
        }
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);

	
	/* find the mpeg1 video encoder */
	pCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
	if (!pCodec) 
	{
		printf("Call avcodec_find_encoder function failed!\n");
		return 0;
	}
	pCodecCtx = avcodec_alloc_context3(pCodec);
	if (!pCodecCtx) 
	{
		printf("Call avcodec_alloc_context3 function failed!\n");
		return 0;
	}
	pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;  
	pCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ420P;  
	//If you want to change the image size can do the zoom function
	pCodecCtx->width = fmt_ctx->streams[video_stream_idx]->codec->width;    
	pCodecCtx->height = fmt_ctx->streams[video_stream_idx]->codec->height;  
	pCodecCtx->time_base.num = 1;    
	pCodecCtx->time_base.den = 15;     
	//Output some information
	if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0)
	{  
		printf("Could not open codec.\n");  
		return 0; 
	}
	av_init_packet(&pkt_out);

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_packet_unref(&orig_pkt);
    }

    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);

    printf("Demuxing succeeded.\n");

    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(pix_fmt), width, height,
               video_dst_filename);
    }

    if (audio_stream) {
        enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
        int n_channels = audio_dec_ctx->channels;
        const char *fmt;

        if (av_sample_fmt_is_planar(sfmt)) {
            const char *packed = av_get_sample_fmt_name(sfmt);
            printf("Warning: the sample format the decoder produced is planar "
                   "(%s). This example will output the first channel only.\n",
                   packed ? packed : "?");
            sfmt = av_get_packed_sample_fmt(sfmt);
            n_channels = 1;
        }

        if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
            goto end;

        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, n_channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }

end:
    avcodec_free_context(&video_dec_ctx);
    avcodec_free_context(&audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_frame_free(&frame);
    av_free(video_dst_data[0]);

    return ret < 0;
}


源码下载地址:http://download.csdn.net/detail/jw20082009jw/9720626


ffmpeg是一款强大的音视频处理工具,提取关键帧是其中的一项重要功能。所谓关键帧,即视频编码过程中的关键数据点,包含完整的图像信息。提取关键帧可以用于视频剪辑、抽取静态画面等应用场景。 在ffmpeg中,使用命令行的方式提取关键帧。具体操作步骤如下: 1. 打开命令行工具,进入到ffmpeg安装目录下的bin文件夹中。 2. 输入命令 ffplay -i input.mp4,其中input.mp4指的是要提取关键帧视频文件名称。这样可以查看视频的基本信息和帧率等参数。 3. 确定要提取关键帧的位置。在视频播放过程中,当按下空格键时,当前帧即为关键帧。 4. 输入命令 ffprobe -select_streams v -show_frames -show_entries frame=pkt_pts_time,pict_type input.mp4 > output.txt,其中output.txt是保存提取关键帧信息的文本文件。该命令会输出视频的所有帧信息,包括关键帧和非关键帧。可以根据帧的pict_type属性来判断帧是否为关键帧。 5. 根据输出的文本信息,确定关键帧的位置。可以用文本编辑器来查看输出文件中的帧信息。 6. 提取关键帧。使用命令 ffmpeg -i input.mp4 -vf "select='eq(pict_type,PICT_TYPE_I)' , setpts=N/FRAME_RATE/TB" -vsync vfr keyframe_%03d.jpg,其中-vf "select='eq(pict_type,PICT_TYPE_I)',表示只输出关键帧;-setpts N/FRAME_RATE/TB,表示用与原视频相同的帧率输出;-vsync vfr,表示根据输入视频的帧率输出关键帧。本命令将提取关键帧保存为jpg图片格式,文件名为keyframe_001.jpg、keyframe_002.jpg等。 通过上述步骤,就可以使用ffmpeg提取视频关键帧。注意在使用过程中需要根据具体情况进行参数调整和路径设置。
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值