使用ffmpeg录像,并保存为mp4文件,涉及到的数据结构和函数接口如下:
代码关键部分都有注释,这里不再过多叙述。
/**
用摄像头录像,并保存为mp4文件
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
const char* input_name= "video4linux2";
const char* file_name = "/dev/video0";
int screen_w= 640;
int screen_h = 480;
#define OUT_STREAM_FRAME_RATE 25
//用于将编码器中剩余的AVPacket输出。
static int flush_encoder(AVFormatContext *fmt_context, unsigned int stream_index)
{
int ret;
int i = 0;
int got_picture;
AVPacket enc_packet;
if (!(fmt_context->streams[stream_index]->codec->codec->capabilities & AV_CODEC_CAP_DELAY)){
printf("! AV_CODEC_CAP_DELAY\n");
return 0;
}
while (1)
{
enc_packet.data = NULL;
enc_packet.size = 0;
av_init_packet(&enc_packet);
ret = avcodec_encode_video2 (fmt_context->streams[stream_index]->codec, &enc_packet,
NULL, &got_picture);
if (ret < 0){
printf("flush_encoder: avcodec_encode_video2 \n");
break;
}
if (!got_picture)
{
ret=0;
printf("avcodec_encode_video2 got_picture=0\n");
break;
}
printf("flush encoder %d frame! size:%d\n", i,enc_packet.size);
av_packet_rescale_ts(&enc_packet, fmt_context->streams[stream_index]->codec->time_base, fmt_context->streams[stream_index]->time_base);//see muxing_r.c
enc_packet.stream_index = stream_index;
//ret = av_interleaved_write_frame(fmt_context, &enc_packet);
ret = av_write_frame(fmt_context, &enc_packet);
av_packet_unref(&enc_packet);
if (ret < 0)
break;
}
return ret;
}
int main(int argc, char * argv[])
{
AVFormatContext *pInFmtContext = NULL;
AVInputFormat *inputFmt;
AVStream *in_stream;
AVCodecContext *pInCodecCtx;
AVCodec *pInCodec;
AVPacket *in_packet;
AVFrame *pInFrame;
AVFormatContext * pOutFmtContext;
AVOutputFormat *outputFmt;
AVStream * out_stream;
AVCodecContext * pOutCodecCtx;
AVCodec *pOutCodec;
AVPacket *out_packet;
AVFrame *pOutFrame;
struct SwsContext * img_convert_ctx;
AVDictionary *param = NULL;
int picture_size = 0;
FILE *fp;
int ret;
const char * out_file = "out.mp4";
int videoindex = -1;
int i;
int got_picture = 0;
int count = 125 * 2;
int framecnt = 1;
int stream_index =0;
unsigned char * out_buffer = NULL;
int start_time = 0, end_time = 0;
av_register_all();
avdevice_register_all();
pInFmtContext = avformat_alloc_context();
pOutFmtContext = avformat_alloc_context();
//解封装,查找video4linux2的视频流输入格式
if( (inputFmt = av_find_input_format (input_name)) == NULL){
printf("av_find_input_format failed\n");
return -1;
}
//根据输出文件来获取输出AVOutputFormat
if( (outputFmt = av_guess_format(NULL, out_file, NULL)) == NULL){
printf("av_guess_format failed\n");
return -1;
}
pOutFmtContext->oformat = outputFmt;//The output container format.
// Open an input stream and read the header, if pInFmtContext is NULL, avformat_open_input will malloc the memory.
if (avformat_open_input ( &pInFmtContext, file_name, inputFmt, NULL) < 0){
printf("avformat_open_input failed\n");
return -1;
}
//print
av_dump_format(pInFmtContext, 0, file_name, 0);
//Create and initialize a AVIOContext for accessing the resource indicated by url.
if( avio_open(&pOutFmtContext->pb, out_file, AVIO_FLAG_READ_WRITE) < 0){
printf("avio_open %s failed\n",out_file);
return -1;
}
if( avformat_find_stream_info(pInFmtContext, NULL) < 0){
printf("avformat_find_stream_info failed\n");
return -1;
}
for(i=0;i<pInFmtContext->nb_streams;i++){
if( pInFmtContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
videoindex = i;
break;
}
}
if( videoindex == -1){
printf("couldn't find a video stream\n");
return -1;
}
in_stream = pInFmtContext->streams[videoindex];
pInCodecCtx = in_stream->codec;
screen_w = pInCodecCtx->width;
screen_h = pInCodecCtx->height;
//Find a registered decoder with a matching codec ID.
pInCodec = avcodec_find_decoder(pInCodecCtx->codec_id);
//Initialize the AVCodecContext to use the given AVCodec.,打开并初始化解码器
if( avcodec_open2( pInCodecCtx, pInCodec,NULL) < 0){
printf("avcodec_open2 failed\n");
return -1;
}
//Add a new stream to a media file.
if( (out_stream = avformat_new_stream(pOutFmtContext, NULL)) == NULL){
printf("avformat_new stream failed\n");
return -1;
}
out_stream->time_base = (AVRational){1, OUT_STREAM_FRAME_RATE};
printf("out_stream:%x,%x\n", out_stream, pOutFmtContext->streams[videoindex]);
pOutCodecCtx = out_stream->codec;
pOutCodecCtx->codec_id = outputFmt->video_codec;//输出码流的编码id,是通过av_guess_format依据输出文件格式获取的。
pOutCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pOutCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
pOutCodecCtx->width = screen_w;// pInCodecCtx->width;
pOutCodecCtx->height = screen_h;// pInCodecCtx->height;
pOutCodecCtx->time_base = out_stream->time_base;
pOutCodecCtx->bit_rate = 400000;
pOutCodecCtx->gop_size = 10;
pOutCodecCtx->qmin = 10;
pOutCodecCtx->qmax = 51;
pOutCodecCtx->max_b_frames = 3;
av_dump_format(pOutFmtContext, 0, out_file, 1);
if( pOutFmtContext->oformat->flags & AVFMT_GLOBALHEADER){
printf("AVFMT_GLOBALHEADER\n");
//pOutCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
//Find a registered encoder with a matching codec ID.
pOutCodec = avcodec_find_encoder(pOutCodecCtx->codec_id);
if(pOutCodec == NULL){
printf("conn't find encoder\n");
return -1;
}
av_dict_set(¶m, "preset", "slow", 0);
av_dict_set(¶m, "tune", "zerolatency", 0);
//Initialize the AVCodecContext to use the given AVCodec.
if( avcodec_open2(pOutCodecCtx, pOutCodec, ¶m) < 0){
printf("avcodec_open2 failed2\n");
return -1;
}
pInFrame = av_frame_alloc();
pOutFrame = av_frame_alloc();
pOutFrame->format = pOutCodecCtx->pix_fmt;
pOutFrame->width = screen_w;
pOutFrame->height = screen_h;
printf("out_stream timebase:%d,%d\n", out_stream->time_base.num,out_stream->time_base.den);
printf("pOutCodecCtx timebase:%d,%d\n", pOutCodecCtx->time_base.num,pOutCodecCtx->time_base.den);
picture_size = av_image_get_buffer_size(pOutCodecCtx->pix_fmt, screen_w, screen_h, 1);
out_buffer = (unsigned char *)av_malloc(picture_size);
if( out_buffer == NULL){
printf("malloc failed\n");
return -1;
}
ret=av_image_fill_arrays((AVPicture *)pOutFrame->data, (AVPicture *)pOutFrame->linesize,
out_buffer, pOutCodecCtx->pix_fmt, screen_w, screen_h, 1);
in_packet = av_packet_alloc(); // in_packet = (AVPacket *)av_malloc(sizeof(AVPacket));
out_packet = av_packet_alloc();
av_new_packet(out_packet, picture_size);
img_convert_ctx = sws_getContext( pInCodecCtx->width, pInCodecCtx->height, pInCodecCtx->pix_fmt,
screen_w, screen_h, pOutCodecCtx->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
printf("pIncodec->framerate:%d, den:%d,%d---\n",pInCodecCtx->framerate,pInCodecCtx->time_base.num,pInCodecCtx->time_base.den);
printf("before avformat_write_header: out_stream timebase8:%d,%d\n", out_stream->time_base.num,out_stream->time_base.den);
avformat_write_header(pOutFmtContext, NULL);
printf("out_stream2:%x,%x\n", out_stream, pOutFmtContext->streams[videoindex]);
printf("after avformat_write_header: out_stream timebase9:%d,%d\n", out_stream->time_base.num,out_stream->time_base.den);
printf("after pOutCodecCtx timebase:%d,%d\n", pOutCodecCtx->time_base.num,pOutCodecCtx->time_base.den);
printf("out_stream index: %d\n", out_stream->index);
start_time = time(NULL);
printf(" start time:%d\n", start_time);
while(count--){
if( av_read_frame( pInFmtContext, in_packet) >= 0){
if( in_packet->stream_index == videoindex){
if( (ret = avcodec_decode_video2(pInCodecCtx, pInFrame, &got_picture, in_packet)) < 0){
printf("avcodec_decode_video2 failed\n");
return -1;
}
if( got_picture == 1){
sws_scale( img_convert_ctx, (const uint8_t * const)pInFrame->data, pInFrame->linesize, 0, pOutCodecCtx->height, pOutFrame->data, pOutFrame->linesize);
printf("pOutFrame:pts:%lld\n", pOutFrame->pts);
pOutFrame->pts = framecnt++;
got_picture = 0;
av_init_packet(out_packet);
if( (ret = avcodec_encode_video2(pOutCodecCtx, out_packet, pOutFrame, &got_picture)) < 0){
printf("avcodec_encode_video2 failed\n");
return -1;
}
if( got_picture == 1){
printf("in_stream timebase:%d/%d; \n", in_stream->time_base.num,in_stream->time_base.den);
printf("pOutCodecCtx timebase:%d/%d; out_stream timebase:%d/%d\n", pOutCodecCtx->time_base.num,pOutCodecCtx->time_base.den, out_stream->time_base.num,out_stream->time_base.den);
av_packet_rescale_ts(out_packet, pOutCodecCtx->time_base, out_stream->time_base);//see muxing_r.c
out_packet->stream_index = out_stream->index;
ret = av_write_frame(pOutFmtContext, out_packet);
//ret = av_interleaved_write_frame(pOutFmtContext, out_packet);
printf("size:%d\n", out_packet->size);
}
av_packet_unref(out_packet);
}
}
}else{
printf("av_read_frame failed\n");
}
av_packet_unref(in_packet);
}
end_time = time(NULL);
printf(" end time:%d,%d\n", end_time, end_time - start_time);
flush_encoder(pOutFmtContext, out_stream->index);
av_write_trailer(pOutFmtContext);
sws_freeContext( img_convert_ctx);
avcodec_close(pInCodecCtx);
avcodec_close(pOutCodecCtx);
avio_close(pOutFmtContext->pb);
av_frame_free(&pInFrame);
av_frame_free(&pOutFrame);
av_packet_free(&in_packet);
av_packet_free(&out_packet);
avformat_close_input(&pInFmtContext);
avformat_free_context(pInFmtContext);
avformat_free_context(pOutFmtContext);
return 0;
}
编译Makefile:
OUT_APP = test
INCLUDE_PATH = /usr/local/include/
INCLUDE = -I$(INCLUDE_PATH)libavutil/ -I$(INCLUDE_PATH)libavdevice/ \
-I$(INCLUDE_PATH)libavcodec/ -I$(INCLUDE_PATH)libswresample \
-I$(INCLUDE_PATH)libavfilter/ -I$(INCLUDE_PATH)libavformat \
-I$(INCLUDE_PATH)libswscale/
FFMPEG_LIBS = -lavformat -lavutil -lavdevice -lavcodec -lswresample -lavfilter -lswscale -lSDL2 -lSDL2_image
SDL_LIBS =
LIBS = $(FFMPEG_LIBS)$(SDL_LIBS)
COMPILE_OPTS = $(INCLUDE)
C = c
OBJ = o
C_COMPILER = cc
C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS) -g
LINK = cc -g -o
LINK_OPTS = -lm -lpthread
LINK_OBJ = main.c
$(OUT_APP): $(LINK_OBJ)
$(LINK)$@ $(LINK_OBJ) $(LIBS) $(LINK_OPTS)
clean:
-rm -rf *.$(OBJ) $(OUT_APP) core *.core *~ *.jpeg
播放:
ffplay out.mp4
参考: