编译安装:
https://blog.csdn.net/zhouzhenhe2008/article/details/48531355
使用:
https://blog.csdn.net/li_wen01/article/details/62036585
https://blog.csdn.net/qqqq245425070/article/details/87529209
Makefile修改:
https://blog.csdn.net/a161619/article/details/78838108
FFMPEG_LIBS = -lavformat -lavdevice -lavcodec -lavutil -lswresample -lavfilter -lswscale -ldl -lm
直接使用ffmpeg:
https://blog.csdn.net/u011394059/article/details/78728809
ffmpeg -i video_name.mp4 -vf select='eq(pict_type\,I)' -vsync 2 -s 1920*1080 -f image2 core-%02d.jpeg
各个参数解释:
-i :输入文件,这里的话其实就是视频,
-vf:是一个命令行,表示过滤图形的描述, 选择过滤器select会选择帧进行输出:包括过滤器常量
pict_type和对应的类型:PICT_TYPE_I 表示是I帧,即关键帧。
-vsync 2:阻止每个关键帧产生多余的拷贝
-f image2 name_%02d.jpeg:将视频帧写入到图片中,样式的格式一般是:
“%d” 或者 “%0Nd”
-s:分辨率,1920*1080
提取MV Blcok
https://blog.csdn.net/ricky90/article/details/79436422
https://github.com/FFmpeg/FFmpeg/blob/release/4.1/doc/examples/extract_mvs.c
decoder.c
#include <stdio.h>
#include <stdlib.h>
#define __STDC_CONSTANT_MACROS
#include "avformat.h"
#include "avcodec.h"
#include "swscale.h"
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include "libavutil/log.h"
int main(int argc, char* argv[])
{
AVFormatContext *pFormatCtx;
int i, videoindex;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame,*pFrameYUV;
uint8_t *out_buffer;
AVPacket *packet;
int y_size;
int ret, got_picture;
struct SwsContext *img_convert_ctx;
//输入文件路径
char* filepath=argv[1];
printf("%s\n",filepath);
int frame_cnt;
av_register_all(); /* 注册复用器 编码器 */
avformat_network_init(); /* 打开网络流 */
pFormatCtx = avformat_alloc_context(); /* 分配内存 */
if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
printf("Couldn't open input stream.\n");
return -1;
}
if(avformat_find_stream_info(pFormatCtx,NULL)<0){ /* 读取一部分视音频数据并且获得一些相关的信息 */
printf("Couldn't find stream information.\n");
return -1;
}
videoindex=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
videoindex=i;
break;
}
if(videoindex==-1){
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx=pFormatCtx->streams[videoindex]->codec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id); /* 查找FFmpeg的解码器 */
if(pCodec==NULL){
printf("Codec not found.\n");
return -1;
}
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){ /* 初始化一个视音频编解码器的AVCodecContext */
printf("Could not open codec.\n");
return -1;
}
/* printf information of the input video */
printf("AVFormatContext AVInputFormat name = %s \n",pFormatCtx->iformat->name);
printf("Number of elements in AVFormatContext.streams = %d \n",pFormatCtx->nb_streams);
printf("Duration of the stream, in AV_TIME_BASE fractional = %d \n",pFormatCtx->duration);
printf("Total stream bitrate in %d bit/s \n",pFormatCtx->bit_rate);
printf("picture width = %d \n",pCodecCtx->width);
printf("picture height = %d \n",pCodecCtx->height);
pFrame=av_frame_alloc(); /* 分配一个 AVFrame 的内存*/
pFrameYUV=av_frame_alloc();
out_buffer=(uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); /*内存分配函数 */
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); /* 为已经分配的空间的结构体AVPicture挂上一段用于保存数据的空间 */
packet=(AVPacket *)av_malloc(sizeof(AVPacket));
printf("--------------- File Information ----------------\n");
av_dump_format(pFormatCtx,0,filepath,0);
printf("-------------------------------------------------\n");
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); /* 初始化SwsContext的函数 */
frame_cnt=0;
//FILE* h264_fd = fopen("test.h264", wb+);
while(av_read_frame(pFormatCtx, packet)>=0){
if(packet->stream_index==videoindex){
// fwrite(packet->data,packet->size, 1 h264_fd);
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet); /* 解码一帧视频数据 */
if(ret < 0){
printf("Decode Error.\n");
return -1;
}
if(got_picture){
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize); /* 转换像素的函数 */
printf("Decoded frame index: %d\n",frame_cnt);
writeJPEG(pFrame, pCodecCtx->width, pCodecCtx->height);
frame_cnt++;
break;
}
}
av_free_packet(packet);
}
//fclose(h264_fd);
sws_freeContext(img_convert_ctx);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
int writeJPEG(AVFrame* frame,int width,int height){
const char* out_file = "hello_world.jpg";
//新建一个输出的AVFormatContext 并分配内存
AVFormatContext* output_cxt = avformat_alloc_context();
avformat_alloc_output_context2(&output_cxt,NULL,"singlejpeg",out_file);
//设置输出文件的格式
// output_cxt->oformat = av_guess_format("mjpeg",NULL,NULL);
//创建和初始化一个和该URL相关的AVIOContext
if(avio_open(&output_cxt->pb,out_file,AVIO_FLAG_READ_WRITE) < 0){
av_log(NULL,AV_LOG_ERROR,"不能打开文件 \n");
return -1;
}
//构建新的Stream
AVStream* stream = avformat_new_stream(output_cxt,NULL);
if(stream == NULL){
av_log(NULL,AV_LOG_ERROR,"创建AVStream失败 \n");
return -1;
}
//初始化AVStream信息
AVCodecContext* codec_cxt = stream->codec;
codec_cxt->codec_id = output_cxt->oformat->video_codec;
codec_cxt->codec_type = AVMEDIA_TYPE_VIDEO;
codec_cxt->pix_fmt = AV_PIX_FMT_YUVJ420P;
codec_cxt->height = height;
codec_cxt->width = width;
codec_cxt->time_base.num = 1;
codec_cxt->time_base.den = 25;
//打印输出文件信息
av_dump_format(output_cxt,0,out_file,1);
AVCodec* codec = avcodec_find_encoder(codec_cxt->codec_id);
if(!codec){
av_log(NULL,AV_LOG_ERROR,"找不到编码器 \n");
return -1;
}
if(avcodec_open2(codec_cxt,codec,NULL) < 0){
av_log(NULL,AV_LOG_ERROR,"不能打开编码器 \n");
return -1;
}
avcodec_parameters_from_context(stream->codecpar,codec_cxt);
//写入文件头
avformat_write_header(output_cxt,NULL);
int size = codec_cxt->width * codec_cxt->height;
AVPacket* packet;
av_new_packet(packet,size * 3);
int got_picture = 0;
int result = avcodec_encode_video2(codec_cxt,packet,frame,&got_picture);
if(result < 0){
av_log(NULL,AV_LOG_ERROR,"编码失败 \n");
return -1;
}
printf("got_picture %d \n",got_picture);
if(got_picture == 1){
//将packet中的数据写入本地文件
result = av_write_frame(output_cxt,packet);
}
av_free_packet(packet);
//将流尾写入输出媒体文件并释放文件数据
av_write_trailer(output_cxt);
if(frame){
av_frame_unref(frame);
}
avio_close(output_cxt->pb);
avformat_free_context(output_cxt);
return 0;
}
Makefile:
OUT_APP = test
INCLUDE_PATH = /usr/local/include/
INCLUDE = -I$(INCLUDE_PATH)libavutil/ -I$(INCLUDE_PATH)libavdevice/ \
-I$(INCLUDE_PATH)libavcodec/ -I$(INCLUDE_PATH)libswresample \
-I$(INCLUDE_PATH)libavfilter/ -I$(INCLUDE_PATH)libavformat \
-I$(INCLUDE_PATH)libswscale/
FFMPEG_LIBS = -lavformat -lavdevice -lavcodec -lavutil -lswresample -lavfilter -lswscale -ldl -lm
SDL_LIBS =
LIBS = $(FFMPEG_LIBS)$(SDL_LIBS)
COMPILE_OPTS = $(INCLUDE)
C = c
OBJ = o
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS)
LINK = cc -o
LINK_OPTS = -lz -lm -lpthread
LINK_OBJ = decoder.o
.$(C).$(OBJ):
$(C_COMPILER) -c $(C_FLAGS) $<
$(OUT_APP): $(LINK_OBJ)
$(LINK)$@ $(LINK_OBJ) $(LIBS) $(LINK_OPTS)
clean:
-rm -rf *.$(OBJ) $(OUT_APP) core *.core *~ include/*~