FFMPEG研究: ubuntu下录制/dev/video0/设备视频保存为mp4格式

学习参考,雷博系列文章:http://blog.csdn.net/column/details/ffmpeg-devel.html


1. 以下代码有详细注释,我就不写文字了。

2. 另外本代码今天刚刚移植完成,只实现了录制保存的功能,还有BUG和细节没有考虑,后面持续更新。

3. 本文章只贴出最新的代码,不保留以前版本的代码,需要的同学可以联系我要-QQ 1356438802。

4. 这个demo实际上是根据雷博的两个demo融合的,

最简单的基于FFmpeg的AVDevice例子(读取摄像头)/ 最简单的基于FFmpeg的视频编码器(YUV to h264)



=========================================================================================

更新LOG():

v1.0-20150915: 刚刚移植完成,第一次上传。

=========================================================================================


<span style="font-family:SimHei;font-size:18px;">

v1.0
/**
 * 最简单的基于FFmpeg的AVDevice例子(读取摄像头)
 * Simplest FFmpeg Device (Read Camera)
 *
 * 雷霄骅 Lei Xiaohua
 * leixiaohua1020@126.com
 * 中国传媒大学/数字电视技术
 * Communication University of China / Digital TV Technology
 * http://blog.csdn.net/leixiaohua1020
 *
 * 本程序实现了本地摄像头数据的获取解码和显示。是基于FFmpeg
 * 的libavdevice类库最简单的例子。通过该例子,可以学习FFmpeg中
 * libavdevice类库的使用方法。
 * 本程序在Windows下可以使用2种方式读取摄像头数据:
 *  1.VFW: Video for Windows 屏幕捕捉设备。注意输入URL是设备的序号,
 *          从0至9。
 *  2.dshow: 使用Directshow。注意作者机器上的摄像头设备名称是
 *         “Integrated Camera”,使用的时候需要改成自己电脑上摄像头设
 *          备的名称。
 * 在Linux下则可以使用video4linux2读取摄像头设备。
 *
 * This software read data from Computer's Camera and play it.
 * It's the simplest example about usage of FFmpeg's libavdevice Library. 
 * It's suiltable for the beginner of FFmpeg.
 * This software support 2 methods to read camera in Microsoft Windows:
 *  1.gdigrab: VfW (Video for Windows) capture input device.
 *             The filename passed as input is the capture driver number,
 *             ranging from 0 to 9.
 *  2.dshow: Use Directshow. Camera's name in author's computer is 
 *             "Integrated Camera".
 * It use video4linux2 to read Camera in Linux.
 * 
 */


#include "al_common.h"

#define __STDC_CONSTANT_MACROS	

//Linux...
#ifdef __cplusplus
extern "C"
{
#endif

#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavdevice/avdevice.h>
#include <SDL/SDL.h>

#ifdef __cplusplus
};
#endif



static int flush_encoder(AVFormatContext *fmt_context, unsigned int stream_index);

int main(int argc, char* argv[])
{
	int				i;
	
	int				videoindex;			//输入视频流的序号
	AVFormatContext	*pInFmtContext;		//输入码流的上下文属性描述		
	AVCodecContext	*pInCodecContext;	//输入视频流的编码信息
	AVCodec			*pInCodec;			//输入视频流需要的解码器
	AVInputFormat 	*input_fmt;			//输入视频流格式
	
	AVFormatContext* pOutFmtContext;		//输出码流的上下文属性描述	
	AVCodecContext* pOutCodecContext;		//输出视频流的编码信息
	AVCodec* pOutCodec;						//输出视频流需要的编码器	
	AVOutputFormat* output_fmt;				//输出视频流格式
	
	AVStream* out_vd_stream;				//AVStream是存储每一个视频/音频流信息的结构体
	AVPacket out_packet;					//压缩数据包
	
	
	const char* out_file = "luo.mp4";
	
	av_register_all();					//初始化所有的编解码器,复用解复用器
	avformat_network_init();			//初始化流媒体网络相关协议
	
	//分配空间
	pInFmtContext = avformat_alloc_context();
	
	//分配空间
	pOutFmtContext = avformat_alloc_context();
	
	//初始化libavdevice库
	avdevice_register_all();

    //寻找video4linux2的视频流输入格式 	
	input_fmt = av_find_input_format("video4linux2");
	
	//根据输出文件名取得文件编码格式,也就是视频流输出格式
	output_fmt = av_guess_format(NULL, out_file, NULL);
	pOutFmtContext->oformat = output_fmt;
	
	/*
	根据input_fmt和设备文件"/dev/video0"初始化pInFmtContext码流
	可以理解为,pInFmtContext码流是从/dev/video0设备以ifmt的格式读出来的视频流,
	avformat_open_input第四个参数option是对input_fmt格式的操作,如分辨率;NULL——>不操作
	*/
	if(avformat_open_input(&pInFmtContext, "/dev/video0", input_fmt, NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	
	/*
	打开输出流out_file(与打开输入流类似)
	根据上面取得的output_fmt以及out_file的形式(可以是RMTP/UDP/TCP/file)初始化输出码流
	*/
	if (avio_open(&pOutFmtContext->pb, out_file, AVIO_FLAG_READ_WRITE) < 0){
		printf("Failed to open output file! \n");
		return -1;
	}


	//查询输入码流中的所有流信息
	if(avformat_find_stream_info(pInFmtContext,NULL)<0)
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}
	
	//寻找输入码流中的视频流,保存序号videoindex
	videoindex=-1;
	for(i=0; i<pInFmtContext->nb_streams; i++) 
		if(pInFmtContext->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
		{
			videoindex=i;
			break;
		}
	if(videoindex==-1)
	{
		printf("Couldn't find a video stream.\n");
		return -1;
	}
	
	//在输出码流中新建一个视频流
	out_vd_stream = avformat_new_stream(pOutFmtContext, 0);
	out_vd_stream->time_base.num = 1; 
	out_vd_stream->time_base.den = 25; 
	if (out_vd_stream == NULL){
		return -1;
	}	
	
	//取出输入视频流的编码信息
	pInCodecContext=pInFmtContext->streams[videoindex]->codec;
	LOGI("--line %d--in_w = %d\t in_h = %d't fmt = %d\n", __LINE__, pInCodecContext->width, pInCodecContext->height, pInCodecContext->pix_fmt);

	//根据编码信息里面的编码器ID,找到对应的解码器
	pInCodec=avcodec_find_decoder(pInCodecContext->codec_id);
	if(pInCodec==NULL)
	{
		printf("Codec not found.\n");
		return -1;
	}
	
	//打开并初始化pInCodec解码器
	if(avcodec_open2(pInCodecContext, pInCodec,NULL)<0)
	{
		printf("Could not open codec.\n");
		return -1;
	}
	LOGI("--line %d--in_w = %d\t in_h = %d't fmt = %d\n", __LINE__, pInCodecContext->width, pInCodecContext->height, pInCodecContext->pix_fmt);

	//获取视频流的编码信息存储地址,然后进行赋值初始化
	pOutCodecContext = out_vd_stream->codec;
	
	pOutCodecContext->codec_id = output_fmt->video_codec;		//编码器ID
	pOutCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;			//IO流类型:视频流,音频流
	pOutCodecContext->pix_fmt = PIX_FMT_YUV420P;				//视频流的帧格式
	pOutCodecContext->width = pInCodecContext->width;  			//帧宽(使用输入视频流的帧宽)
	pOutCodecContext->height = pInCodecContext->height;			//帧高(使用输入视频流的帧高)
	pOutCodecContext->time_base.num = 1;  
	pOutCodecContext->time_base.den = 25;  						//设置帧率25
	pOutCodecContext->bit_rate = 400000;						//比特率  
	pOutCodecContext->gop_size=250;								//设置GOP大小:每250帧插入一个I帧
	//H264
	//pOutCodecContext->me_range = 16;
	//pOutCodecContext->max_qdiff = 4;
	//pOutCodecContext->qcompress = 0.6;
	pOutCodecContext->qmin = 10;
	pOutCodecContext->qmax = 51;

	//Optional Param
	//该值表示在两个非B帧之间,允许插入的B帧的最大帧数
	pOutCodecContext->max_b_frames=3;	

	//Show some Information
	//将输出码流的信息显示到终端
	av_dump_format(pOutFmtContext, 0, out_file, 1);
	
	//根据解码器ID找到对应的解码器
	pOutCodec = avcodec_find_encoder(pOutCodecContext->codec_id);
	if (!pOutCodec)
	{
		printf("Can not find encoder! \n");
		return -1;
	}
	
	//------------------------------------------------------------------------------
	//设置一些参数
	AVDictionary *param = 0;
	//H.264
	if(pOutCodecContext->codec_id == AV_CODEC_ID_H264) {
		av_dict_set(¶m, "preset", "slow", 0);
		av_dict_set(¶m, "tune", "zerolatency", 0);
		//av_dict_set(¶m, "profile", "main", 0);
	}
	//H.265
	if(pOutCodecContext->codec_id == AV_CODEC_ID_H265){
		av_dict_set(¶m, "preset", "ultrafast", 0);
		av_dict_set(¶m, "tune", "zero-latency", 0);
	}
	//打开并初始化pOutCodec解码器
	if (avcodec_open2(pOutCodecContext, pOutCodec, ¶m) < 0)
	{
		printf("Failed to open encoder! \n");
		return -1;
	}
	//------------------------------------------------------------------------------
	
	
	/*
	存储:
	1. 原始数据(即非压缩数据,例如对视频来说是YUV,RGB,对音频来说是PCM)
	2. 帧信息
	*/
	AVFrame	*pInFrame;				//输入的视频流中取出来的视频帧
	AVFrame	*pOutFrame;				//转换成YUV420P格式后的视频帧
	
	//初始化帧
	pInFrame = av_frame_alloc();
	pOutFrame = av_frame_alloc();
	
	/*
	由于pInFrame是来自于视频流,解码函数会自动为其分配帧数据空间
	而pFrameYUV是转换格式后的帧,需要预先分配空间给它
	**注意:
	几乎所有的编码器编码用的数据源格式都必须是YUV420P,所以当视频设备取出来的帧格式不是这个格式时,
	需要用libswscale库来进行格式和分辨率的转换,至YUV420P之后,才能进行编码压缩。
	解码过程:AVStream --> AVPacket --> AVFrame(AVFrame是非压缩数据包,可直接用于显示)
	编码过程:AVFrame --> AVPacket --> AVStream
	*/
	int buf_size;
	uint8_t* out_buf;
	
	//avpicture_get_size(目标格式,目标帧宽,目标帧高)
	buf_size = avpicture_get_size(pOutCodecContext->pix_fmt, pOutCodecContext->width, pOutCodecContext->height);
	out_buf = (uint8_t *)av_malloc(buf_size);
	avpicture_fill((AVPicture *)pOutFrame, out_buf, pOutCodecContext->pix_fmt, pOutCodecContext->width, pOutCodecContext->height);
	
	
	int ret, got_picture;
	
	/*存储压缩编码数据相关信息的结构体。AVFrame是非压缩的*/
	AVPacket *in_packet=(AVPacket *)av_malloc(sizeof(AVPacket));
	
	//根据Frame大小,初始化一个Packet
	av_new_packet(&out_packet,buf_size);
	
	//往输出码流中写入header
	avformat_write_header(pOutFmtContext,NULL);
  
	/*
	libswscale是一个主要用于处理图片像素数据的类库。可以完成图片像素格式的转换,图片的拉伸等工作
	sws_getContext():初始化一个SwsContext。
	sws_scale():处理图像数据。
	sws_freeContext():释放一个SwsContext。
	其中sws_getContext()也可以用sws_getCachedContext()取代。几乎“万能”的图片像素数据处理类库
	
	srcW:源图像的宽
	srcH:源图像的高
	srcFormat:源图像的像素格式
	dstW:目标图像的宽
	dstH:目标图像的高
	dstFormat:目标图像的像素格式
	flags:设定图像拉伸使用的算法
	成功执行的话返回生成的SwsContext,否则返回NULL。
	*/
	struct SwsContext *img_convert_context;
	
	//img_convert_context描述了两个格式转换的协议:格式、分辨率、转码算法...
	img_convert_context = sws_getContext(pInCodecContext->width, pInCodecContext->height, pInCodecContext->pix_fmt, pOutCodecContext->width, pOutCodecContext->height, pOutCodecContext->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL); 

	int pix_size;
	pix_size = pOutCodecContext->width * pOutCodecContext->height;
	
	//================================================
	int framenum=500;
	int framecnt=0;	//Frames to encode 经过编码的有效帧数

	for (;;) 
	{
		/*读取码流中的音频若干帧或者视频一帧。例如,解码视频的时候,
		每解码一个视频帧,需要先调用 av_read_frame()获得一个视频的压缩数据包,
		然后才能对该数据进行解码(例如H.264中一帧压缩数据通常对应一个NAL)。
		packet是压缩数据
		*/
		if(av_read_frame(pInFmtContext, in_packet)>=0)
		{
			//如果该数据包是视频数据包,则进行视频解码
			if(in_packet->stream_index == videoindex)
			{
				/*
				从packet压缩数据里取出一帧AVFrame非压缩数据
				输入一个压缩编码的结构体AVPacket,输出一个解码后的结构体AVFrame
				*/
				ret = avcodec_decode_video2(pInCodecContext, pInFrame, &got_picture, in_packet);
				if(ret < 0)
				{
					LOGE("Decode Error.\n");
					av_free_packet(in_packet);
					continue;
				}
				
				//成功从输入视频流中解码出一帧数据
				if(got_picture)
				{				
					//转换帧格式
					sws_scale(img_convert_context, (const uint8_t* const*)pInFrame->data, pInFrame->linesize, 0, pInCodecContext->height, pOutFrame->data, pOutFrame->linesize);

					//PTS: 帧时间戳
					pOutFrame->pts = framecnt;
					framecnt++;
					if(framecnt > framenum)
					{
						LOGE("framecnt > %d \n", framenum);
						av_free_packet(in_packet);
						break;
					}
					
					//开始压缩数据
					got_picture = 0;
					/*
					将帧编码成包:输入一个帧,输出一个包
					**注意:
					几乎所有的编码器编码用的数据源格式都必须是YUV420P,所以当视频设备取出来的帧格式不是这个格式时,
					需要用libswscale库来进行格式和分辨率的转换,至YUV420P之后,才能进行编码压缩。
					解码过程:AVStream --> AVPacket --> AVFrame(AVFrame是非压缩数据包,可直接用于显示)
					编码过程:AVFrame --> AVPacket --> AVStream
					*/
					ret = avcodec_encode_video2(pOutCodecContext, &out_packet, pOutFrame, &got_picture);
					if(ret < 0)
					{
						LOGE("Failed to encode! \n");
						av_free_packet(in_packet);
						continue;
					}
					if (got_picture == 1)
					{
						LOGI("Succeed to encode frame: %5d\tsize:%5d \tindex = %d\n", framecnt, out_packet.size, out_vd_stream->index);
												
						out_packet.stream_index = out_vd_stream->index;		//标识该视频/音频流: 序号
						
						//将视频包写入到输出码流
						ret = av_write_frame(pOutFmtContext, &out_packet);
						
						//释放该包
						av_free_packet(&out_packet);
					}
				}
			}
			av_free_packet(in_packet);
		}
		else
		{
			
			break;
		}
	}
	
	//=========================================
	//将编码器中剩余的帧数据冲刷到输出码流中,即写入文件,防止丢失帧
	ret = flush_encoder(pOutFmtContext, 0);
	if (ret < 0) 
	{
		LOGE("Flushing encoder failed\n");
	}
	
	LOGI("av_write_trailer\n");
	//往输出码流中写入tail
	av_write_trailer(pOutFmtContext);

	sws_freeContext(img_convert_context);


	//Clean
	if (out_vd_stream)
	{
		//关闭输出视频流的编码器
		avcodec_close(pOutCodecContext);
		//释放帧
		av_free(pOutFrame);
		//释放缓存
		av_free(out_buf);
	}
	//关闭输出视频流
	avio_close(pOutFmtContext->pb);
	//关闭输出码流
	avformat_free_context(pOutFmtContext);


	//av_free(out_buffer);
	av_free(pInFrame);
	//关闭输入视频流的解码器
	avcodec_close(pInCodecContext);
	
	//关闭输入码流
	avformat_close_input(&pInFmtContext);

	return 0;
}


static int flush_encoder(AVFormatContext *fmt_context, unsigned int stream_index)
{
	int ret;
	int got_picture;
	AVPacket enc_packet;
	
	if (!(fmt_context->streams[stream_index]->codec->codec->capabilities & CODEC_CAP_DELAY))
		return 0;
	
	while (1) 
	{
		enc_packet.data = NULL;
		enc_packet.size = 0;
		av_init_packet(&enc_packet);
		
		ret = avcodec_encode_video2 (fmt_context->streams[stream_index]->codec, &enc_packet,
			NULL, &got_picture);
			
		av_frame_free(NULL);
		if (ret < 0)
			break;
		if (!got_picture)
		{
			ret=0;
			break;
		}
		LOGI("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_packet.size);
		/* mux encoded frame */
		ret = av_write_frame(fmt_context, &enc_packet);
		if (ret < 0)
			break;
	}
	return ret;
}


</span>


=========================================================================================

更新LOG():

v2.0-20151019: 

1. 经过之前2个星期的调试,终于能够稳定录制了。

2. v1.0版本的源码有变量空间释放,以及录制视频时间轴的问题。经过本人深入理解(但不一定正确哈,只是以格物致知的态度去求知而已),并且实验,一些内存空间的使用和释放有一定的规律,特别是AVFrame和AVpacket的变量空间。

3. 另外由于v1.0的av_write_frame,写入输出码流的packet没有正确加入时间戳,最后录制的视频没有时间轴,不能正常播放。在研读了FFMPEG官方示例muxing.c后,进行了相应的修改,v2.0版本录制的视频可以正常播放,帧数也正常。

4. 其他修改则是数据结构的一些优化。

5. 由于在不同的环境搞来搞去,字符集搞乱了,注释乱码,可以忽略。看v1.0的注释就可以了。

附上源码包,平台是ubuntu-64:链接:http://pan.baidu.com/s/1jvzPk 密码:ht2q

=========================================================================================


v2.0
#include "record_camera.h"



//Linux...
#ifdef __cplusplus
extern "C"
{
#endif

#define STREAM_FRAME_RATE 25 // 25 fps



static struct RecordContext *gRecordContext;		

static int flush_encoder(AVFormatContext *fmt_context, unsigned int stream_index)
{
	int ret;
	int got_picture;
	AVPacket encode_packet;
	
	if (!(fmt_context->streams[stream_index]->codec->codec->capabilities & CODEC_CAP_DELAY))
		return 0;
	
	while (1) 
	{
		encode_packet.data = NULL;
		encode_packet.size = 0;
		av_init_packet(&encode_packet);
		
		ret = avcodec_encode_video2 (fmt_context->streams[stream_index]->codec, &encode_packet,
			NULL, &got_picture);
			
		av_frame_free(NULL);
		if (ret < 0)
			break;
		if (!got_picture)
		{
			ret=0;
			break;
		}
		LOGI("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", encode_packet.size);
		/* mux encoded frame */
		ret = av_write_frame(fmt_context, &encode_packet);
		if (ret < 0)
			break;
	}
	return ret;
}

int SetShowBufferAddr(uint8 *ShowBuffer)
{
	//��ʼ���Դ�
	if(NULL == ShowBuffer)
	{
		return -1;
	}
	else
	{
		gRecordContext->ShowBuffer = ShowBuffer;
	}

	return 0;
}


int RecordInit(char *RecordFile, int IsShow)
{
	int i;
	int ret = -1;
	char file[24] = {0};
	AVDictionary *option = NULL;
	AVDictionary *param = NULL;
	
	gRecordContext = (struct RecordContext *)calloc(1, sizeof(struct RecordContext));
	ERROR(NULL == gRecordContext, err1, "calloc gRecordContext");
	
	//memset(gRecordContext, 0, sizeof(struct RecordContext));
	LOGI("RecordInit start\n");
	
	
	//��ʼ������ļ���
	if(RecordFile == NULL)
	{
		LOGI("create a random file to record video\n");		
		srand((unsigned)time(NULL));
		sprintf(file, "/storage/sdcard0/Download/0917-%d-%d.mp4", rand()%10, rand()%10);
		gRecordContext->out_file_name = file;
	}
	else
	{
		gRecordContext->out_file_name = RecordFile;
	}
	
	gRecordContext->FrameCount = 0;						//��������Ч֡������
	gRecordContext->IsShow = IsShow;
	gRecordContext->device_name = "/dev/video0";
	
	av_register_all();					//��ʼ�����еı�����������ý⸴����
	avformat_network_init();			//��ʼ����ý���������Э��
	
	//����ռ�
	gRecordContext->pInFmtContext = avformat_alloc_context();
	
	//����ռ�
	gRecordContext->pOutFmtContext = avformat_alloc_context();
	ERROR(((gRecordContext->pInFmtContext == NULL) || (gRecordContext->pOutFmtContext == NULL)), err2, "avformat_alloc_context");

	//��ʼ��libavdevice��
	avdevice_register_all();

    //Ѱ��video4linux2����Ƶ�������ʽ
	gRecordContext->input_fmt = av_find_input_format("video4linux2");
	ERROR((gRecordContext->input_fmt == NULL), err3, "Couldn't av_find_input_format\n");
	
	//��������ļ���ȡ���ļ������ʽ��Ҳ������Ƶ�������ʽ
	gRecordContext->output_fmt = av_guess_format(NULL, gRecordContext->out_file_name, NULL);
	gRecordContext->pOutFmtContext->oformat = gRecordContext->output_fmt;
	
	//------------------------------------------------------------------------------
	//����������Ƶ����һЩ����

	av_dict_set(&option, "video_size", "640x480", 0);		//���÷ֱ���
	av_dict_set(&option, "pixel_format", "mjpeg", 0);	
	//------------------------------------------------------------------------------	
	/*
	����input_fmt���豸�ļ�"/dev/video0"��ʼ��pInFmtContext����
	�������Ϊ��pInFmtContext�����Ǵ�/dev/video0�豸��ifmt�ĸ�ʽ����������Ƶ����
	avformat_open_input���ĸ�����option�Ƕ�input_fmt��ʽ�IJ�������ֱ��ʣ�NULL����>������
	*/
	ret = access(gRecordContext->device_name, F_OK);
	ERROR(ret < 0, err3, "device is not exsist!\n")
	
	ret = avformat_open_input(&gRecordContext->pInFmtContext, "/dev/video0", gRecordContext->input_fmt, NULL);
	ERROR((ret != 0), err3, "Couldn't open input stream.\n");

	/*
	�������out_file��������������ƣ�
	��������ȡ�õ�output_fmt�Լ�out_file����ʽ��������RMTP/UDP/TCP/file����ʼ���������
	*/
	ret = avio_open(&gRecordContext->pOutFmtContext->pb, gRecordContext->out_file_name, AVIO_FLAG_READ_WRITE);
	ERROR(ret < 0, err7, "Failed to open output file! \n");


	//��ѯ���������е���������Ϣ
	ret = avformat_find_stream_info(gRecordContext->pInFmtContext,NULL);
	ERROR(ret < 0, err8, "Couldn't find stream information.\n");	
	
	//Ѱ�����������е���Ƶ�����������videoindex
	gRecordContext->videoindex = -1;
	for(i = 0; i < gRecordContext->pInFmtContext->nb_streams; i++)
	{
		if(gRecordContext->pInFmtContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			gRecordContext->videoindex = i;
			break;
		}		
	}
		
	ERROR((gRecordContext->videoindex == -1), err9, "Couldn't find a video stream.\n");
	
	
	//������������½�һ����Ƶ��
	gRecordContext->out_vd_stream = avformat_new_stream(gRecordContext->pOutFmtContext, 0);
	ERROR((gRecordContext->out_vd_stream == NULL), err10, "avformat_new_stream");
	

	gRecordContext->out_vd_stream->time_base = (AVRational){1, STREAM_FRAME_RATE}; 
		
	
	//ȡ��������Ƶ���ı�����Ϣ
	gRecordContext->pInCodecContext = gRecordContext->pInFmtContext->streams[gRecordContext->videoindex]->codec;
	LOGI("--line %d-- in_w = %d\t in_h = %d\t in_fmt = %d\t in_encode = %d\n",	\
	__LINE__, gRecordContext->pInCodecContext->width, gRecordContext->pInCodecContext->height,	\
	gRecordContext->pInCodecContext->pix_fmt, gRecordContext->pInCodecContext->codec_id);

	//���ݱ�����Ϣ����ı�����ID���ҵ���Ӧ�Ľ�����
	gRecordContext->pInCodec = avcodec_find_decoder(gRecordContext->pInCodecContext->codec_id);
	ERROR((gRecordContext->pInCodec == NULL), err11, "Codec not found.\n");	
	
	//�⿪ү߽��ʼ��pInCodec������
	ret = avcodec_open2(gRecordContext->pInCodecContext, gRecordContext->pInCodec,NULL);
	ERROR(ret < 0, err12, "Could not open input codec.\n")
	
	LOGI("--line %d-- in_w = %d\t in_h = %d\t in_fmt = %d\t in_encode = %d\n",	\
	__LINE__, gRecordContext->pInCodecContext->width, gRecordContext->pInCodecContext->height,	\
	gRecordContext->pInCodecContext->pix_fmt, gRecordContext->pInCodecContext->codec_id);
	
	//��ȡ�����Ƶ���ı�����Ϣ�洢��ַ��Ȼ����и�ֵ��ʼ��
	gRecordContext->pOutCodecContext = gRecordContext->out_vd_stream->codec;
	
	gRecordContext->pOutCodecContext->codec_id = gRecordContext->output_fmt->video_codec;		//������ID
	gRecordContext->pOutCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;						//IO�����ͣ���Ƶ������Ƶ��
	gRecordContext->pOutCodecContext->pix_fmt = PIX_FMT_YUV420P;								//��Ƶ����֡��ʽ
	gRecordContext->pOutCodecContext->width = gRecordContext->pInCodecContext->width;  		//֡��ʹ��������Ƶ����֡��
	gRecordContext->pOutCodecContext->height = gRecordContext->pInCodecContext->height;		//֡�ߣ�ʹ��������Ƶ����֡�ߣ�
	gRecordContext->pOutCodecContext->time_base = gRecordContext->out_vd_stream->time_base;
	gRecordContext->pOutCodecContext->bit_rate = 400000;							//������
	gRecordContext->pOutCodecContext->gop_size=250;								//����GOP��С��ÿ250֡����һ��I֡
	
	LOGI("--line %d-- out_w = %d\t out_h = %d\t out_fmt = %d\t out_encode = %d\n",	\
	__LINE__, gRecordContext->pOutCodecContext->width, gRecordContext->pOutCodecContext->height,	\
	gRecordContext->pOutCodecContext->pix_fmt, gRecordContext->pOutCodecContext->codec_id);

	//H264
	//pOutCodecContext->me_range = 16;
	//pOutCodecContext->max_qdiff = 4;
	//pOutCodecContext->qcompress = 0.6;
	gRecordContext->pOutCodecContext->qmin = 10;
	gRecordContext->pOutCodecContext->qmax = 51;

	//Optional Param
	//��ֵ��ʾ��������B֮֡�䣬��������B֡�����֡��
	gRecordContext->pOutCodecContext->max_b_frames=3;

	/* Some formats want stream headers to be separate. */
    if (gRecordContext->pOutFmtContext->oformat->flags & AVFMT_GLOBALHEADER)
	{
		LOGI("AVFMT_GLOBALHEADER\n");
		gRecordContext->pOutCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;		
	}
        	

	//Show some Information
	//�������������Ϣ��ʾ���նˡ�1,��ʾ�������; 0, ��ʾ��������
	av_dump_format(gRecordContext->pOutFmtContext, 0, gRecordContext->out_file_name, 1);
	
	//���ݽ�����ID�ҵ���Ӧ�Ľ�����
	gRecordContext->pOutCodec = avcodec_find_encoder(gRecordContext->pOutCodecContext->codec_id);
	ERROR(!gRecordContext->pOutCodec, err13, "Can not find encoder! \n");
		
	//------------------------------------------------------------------------------
	//����һЩ����

	//H.264
	if(gRecordContext->pOutCodecContext->codec_id == AV_CODEC_ID_H264) {
		av_dict_set(¶m, "preset", "slow", 0);
		av_dict_set(¶m, "tune", "zerolatency", 0);
		av_dict_set(¶m, "profile", "main", 0);
	}
	//H.265
	if(gRecordContext->pOutCodecContext->codec_id == AV_CODEC_ID_H265){
		av_dict_set(¶m, "preset", "ultrafast", 0);
		av_dict_set(¶m, "tune", "zero-latency", 0);
	}
	
	//20150929
	LOGI("extradata_size = %d\n", gRecordContext->pOutCodecContext->extradata_size);
	if(gRecordContext->pOutCodecContext->extradata_size > 0)
	{
		printf_array(gRecordContext->pOutCodecContext->extradata, gRecordContext->pOutCodecContext->extradata_size);
	}
	
	//�⿪ү߽��ʼ��pOutCodec������
	if (avcodec_open2(gRecordContext->pOutCodecContext, gRecordContext->pOutCodec, ¶m) < 0)
	{
		LOGE("Failed to open encoder! \n");
		return -1;
	}
	
	//20150929
	LOGI("extradata_size = %d\n", gRecordContext->pOutCodecContext->extradata_size);
	if(gRecordContext->pOutCodecContext->extradata_size > 0)
	{
		printf_array(gRecordContext->pOutCodecContext->extradata, gRecordContext->pOutCodecContext->extradata_size);
	}
	//------------------------------------------------------------------------------
	
	//��ʼ��֡
	gRecordContext->pInFrame = av_frame_alloc();
	//avpicture_get_size(Ŀ���ʽ��Ŀ��֡��Ŀ��֡��)
	gRecordContext->InFrameBufSize = avpicture_get_size(gRecordContext->pInCodecContext->pix_fmt,	\
	gRecordContext->pInCodecContext->width, gRecordContext->pInCodecContext->height);
	LOGI("ShowBufferSize = InFrameBufSize = %d\n", gRecordContext->InFrameBufSize);
	

	gRecordContext->ShowBufferSize = gRecordContext->InFrameBufSize;

	
	gRecordContext->pOutFrame = av_frame_alloc();
	
	/*
	����pInFrame����������Ƶ�������뺯�����Զ�Ϊ�����֡���ݿռ�
	��pOutFrame��ת����ʽ���֡����ҪԤ�ȷ���ռ����
	**ע��:
	�������еı����������õ�����Դ��ʽ��������YUV420P�����Ե���Ƶ�豸ȡ������֡��ʽ���������ʽʱ��
	��Ҫ��libswscale�������и�ʽ�ͷֱ��ʵ�ת������YUV420P֮�㣬ү߽�ܽ��б���ѹ����
	������̣�AVStream --> AVPacket --> AVFrame��AVFrame�Ƿ�ѹ�����ݰ�����ֱ��������ʾ��
	������̣�AVFrame --> AVPacket --> AVStream
	*/	
	//avpicture_get_size(Ŀ���ʽ��Ŀ��֡��Ŀ��֡��)
	gRecordContext->OutFrameBufSize = avpicture_get_size(gRecordContext->pOutCodecContext->pix_fmt,	\
	gRecordContext->pOutCodecContext->width, gRecordContext->pOutCodecContext->height);
	LOGI("OutFrameBufSize = %d\n", gRecordContext->OutFrameBufSize);
	
	gRecordContext->OutFrameBuffer = (uint8_t *)av_malloc(gRecordContext->OutFrameBufSize);
	avpicture_fill((AVPicture *)gRecordContext->pOutFrame, gRecordContext->OutFrameBuffer, gRecordContext->pOutCodecContext->pix_fmt,	\
	gRecordContext->pOutCodecContext->width, gRecordContext->pOutCodecContext->height);
	
		
	/*�洢ѹ���������������Ϣ�Ľṹ�塣AVFrame�Ƿ�ѹ����*/
	gRecordContext->in_packet 	= (AVPacket *)av_malloc(sizeof(AVPacket));	
	
	//Be care full of these: av_new_packet should be call
	gRecordContext->out_packet 	= (AVPacket *)av_malloc(sizeof(AVPacket));	 	//20150918
	av_new_packet(gRecordContext->out_packet, gRecordContext->OutFrameBufSize);	//20150918
	
	//img_convert_context������������ʽת����Э�飺��ʽ���ֱ��ʡ�ת���㷨...
	gRecordContext->img_convert_context = sws_getContext(gRecordContext->pInCodecContext->width,	\
	gRecordContext->pInCodecContext->height, gRecordContext->pInCodecContext->pix_fmt,	\
	gRecordContext->pOutCodecContext->width, gRecordContext->pOutCodecContext->height,	\
	gRecordContext->pOutCodecContext->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL); 

	
	gRecordContext->pix_size = gRecordContext->pOutCodecContext->width * gRecordContext->pOutCodecContext->height;
	
	//������������header
	avformat_write_header(gRecordContext->pOutFmtContext,NULL);
	
	LOGI("RecordInit end\n");

	return gRecordContext->ShowBufferSize;

err1:
	return -1;

err2:
	LOGI("err2 11111\n");
	FREE(gRecordContext);
	LOGI("err2 22222\n");
	return -1;
err3:
	LOGI("err3 11111\n");
	avformat_free_context(gRecordContext->pOutFmtContext);
	avformat_free_context(gRecordContext->pInFmtContext);
	FREE(gRecordContext);
	
	LOGI("err3 22222\n");
	return -1;
	
err7:
err8:
err9:
err10:
err11:
err12:
err13:
	LOGI("err3 11111\n");
	avformat_close_input(&gRecordContext->pInFmtContext);
	avformat_free_context(gRecordContext->pOutFmtContext);
	avformat_free_context(gRecordContext->pInFmtContext);
	FREE(gRecordContext);
	
	LOGI("err3 22222\n");
	return -1;	
	

}

int Recording(void)
{
	int ret = -1;
	int got_picture = -1;
	
	LOGI("Recording start\n");
	/*��ȡ�����е���Ƶ����֡������Ƶһ֡�����磬������Ƶ��ʱ��
	ÿ����һ����Ƶ֡����Ҫ�ȵ��� av_read_frame()���һ����Ƶ��ѹ�����ݰ���
	Ȼ����ܶԸ����ݽ��н��루����H.264��һ֡ѹ������ͨ����Ӧһ��NAL����
	packet��ѹ������
	*/
	ret = av_read_frame(gRecordContext->pInFmtContext, gRecordContext->in_packet);
	if(ret >= 0)
	{
		LOGI("lines= %d\tfunc = %s, frame count: %5d\n", __LINE__, __func__, gRecordContext->FrameCount);

		//��������ݰ�����Ƶ���ݰ����������Ƶ����
		if(gRecordContext->in_packet->stream_index == gRecordContext->videoindex)
		{
			LOGI("lines= %d\tfunc = %s, frame count: %5d\n", __LINE__, __func__, gRecordContext->FrameCount);
			
			/*
			��packetѹ��������ȡ��һ֡AVFrame��ѹ������
			����һ��ѹ������Ľṹ��AVPacket�����һ�������Ľṹ��AVFrame
			*/
			ret = avcodec_decode_video2(gRecordContext->pInCodecContext, gRecordContext->pInFrame, &got_picture, gRecordContext->in_packet);
			if(ret < 0)
			{
				LOGE("Decode Error.\n");
				av_free_packet(gRecordContext->in_packet);
				return -1;
			}
			LOGI("lines= %d\tfunc = %s, frame count: %5d\n", __LINE__, __func__, gRecordContext->FrameCount);
			
			//�ɹ���������Ƶ���н����һ֡����
			if(got_picture == 1)
			{				
				LOGI("lines= %d\tfunc = %s, frame count: %5d\n", __LINE__, __func__, gRecordContext->FrameCount);
				
				if(1 == gRecordContext->IsShow)
				{
					//��������Ƶ��������ͼƬ������apk���Դ�
					memcpy(gRecordContext->ShowBuffer, (const uint8_t* const*)gRecordContext->pInFrame->data, gRecordContext->ShowBufferSize);
				}
				
				LOGI("lines= %d\tfunc = %s, frame count: %5d\n", __LINE__, __func__, gRecordContext->FrameCount);
				
				//ת��֡��ʽ
				sws_scale(gRecordContext->img_convert_context, (const uint8_t* const*)gRecordContext->pInFrame->data, gRecordContext->pInFrame->linesize, 0, gRecordContext->pInCodecContext->height, gRecordContext->pOutFrame->data, gRecordContext->pOutFrame->linesize);
				LOGI("lines= %d\tfunc = %s, frame count: %5d\n", __LINE__, __func__, gRecordContext->FrameCount);
				
				
				//PTS: ֡ʱ���
				gRecordContext->pOutFrame->pts = gRecordContext->FrameCount;
				gRecordContext->FrameCount++;	
				
				if (gRecordContext->pOutFmtContext->oformat->flags & AVFMT_RAWPICTURE)	
				{
					LOGI("raw picture\n");
				}
				
				//��ʼѹ������
				got_picture = 0;
				av_init_packet(gRecordContext->out_packet);
				/*
				��֡����ɰ�������һ��֡�����һ����
				**ע��:
				�������еı����������õ�����Դ��ʽ��������YUV420P�����Ե���Ƶ�豸ȡ������֡��ʽ���������ʽʱ��
				��Ҫ��libswscale�������и�ʽ�ͷֱ��ʵ�ת������YUV420P֮�㣬ү߽�ܽ��б���ѹ����
				������̣�AVStream --> AVPacket --> AVFrame��AVFrame�Ƿ�ѹ�����ݰ�����ֱ��������ʾ��
				������̣�AVFrame --> AVPacket --> AVStream
				*/
				ret = avcodec_encode_video2(gRecordContext->pOutCodecContext, gRecordContext->out_packet, gRecordContext->pOutFrame, &got_picture);
				if(ret < 0)
				{
					LOGE("Failed to encode! \n");
					av_free_packet(gRecordContext->in_packet);
					return -1;
				}
				LOGI("lines= %d\tfunc = %s, frame count: %5d\n", __LINE__, __func__, gRecordContext->FrameCount);

				if (got_picture == 1)
				{
					LOGI("Succeed to encode frame: %5d\tsize:%5d \tindex = %d\n", gRecordContext->FrameCount, gRecordContext->out_packet->size, gRecordContext->out_vd_stream->index);
					
					LOGI("before rescale: PTS = %d\t DTS = %d\n", gRecordContext->out_packet->pts, gRecordContext->out_packet->dts);
					LOGI("before rescale: duration = %d\t convergence_duration = %d\n", gRecordContext->out_packet->duration, gRecordContext->out_packet->convergence_duration);

					//重要!!!!时间戳		
					av_packet_rescale_ts(gRecordContext->out_packet, gRecordContext->pOutCodecContext->time_base, gRecordContext->out_vd_stream->time_base);	
					
					LOGI("after rescale: PTS = %d\t DTS = %d\n", gRecordContext->out_packet->pts, gRecordContext->out_packet->dts);
					LOGI("after rescale: duration = %d\t convergence_duration = %d\n", gRecordContext->out_packet->duration, gRecordContext->out_packet->convergence_duration);
					
					gRecordContext->out_packet->stream_index = gRecordContext->out_vd_stream->index;		//��ʶ����Ƶ/��Ƶ��: ���
					
					//����Ƶ��д�뵽�������
					ret = av_write_frame(gRecordContext->pOutFmtContext, gRecordContext->out_packet);
					
					//�ͷŸð�
					av_free_packet(gRecordContext->out_packet);
				}
			}
		}
		av_free_packet(gRecordContext->in_packet);
	}

	LOGI("Recording end\n");
	
	return 0;
}

static int printf_array(uint8_t *array, int size)
{
	int i;
	for(i = 0; i < size; i++)
	{
		printf("0x%x  ", *(array + i));
	}
	printf("\n");
}

int RecordUninit(void)
{	
	int ret = -1;

	LOGI("RecordUninit start\n");
	//=========================================
	//����������ʣ���֡���ݳ�ˢ����������У���д���ļ�����ֹ��ʧ֡
	ret = flush_encoder(gRecordContext->pOutFmtContext, gRecordContext->out_vd_stream->index);
	if (ret < 0) 
	{
		LOGE("Flushing encoder failed\n");
	}
	
	LOGI("av_write_trailer\n");
	//������������tail
	av_write_trailer(gRecordContext->pOutFmtContext);

	sws_freeContext(gRecordContext->img_convert_context);


	//Clean
	if (gRecordContext->out_vd_stream)
	{
		//�ر������Ƶ���ı�����
		avcodec_close(gRecordContext->pOutCodecContext);
		
		//�ͷ�֡
		av_frame_free(&gRecordContext->pOutFrame);		
		
		//�ͷŰ�		
		av_free(gRecordContext->out_packet);
		
		//�ͷŻ���
		av_free(gRecordContext->OutFrameBuffer);		
	}
	
	//�ر������Ƶ��
	avio_close(gRecordContext->pOutFmtContext->pb);
	//�ر��������
	avformat_free_context(gRecordContext->pOutFmtContext);


	//�ͷ�֡	
	av_frame_free(&gRecordContext->pInFrame);
	
	//�ͷŰ�
	//av_free_packet(gRecordContext->in_packet);
	av_free(gRecordContext->in_packet);
	
	//�ر�������Ƶ���Ľ�����
	avcodec_close(gRecordContext->pInCodecContext);
	
	//�ر���������
	avformat_close_input(&gRecordContext->pInFmtContext);
	avformat_free_context(gRecordContext->pInFmtContext);
	
	FREE(gRecordContext);
	
	LOGI("RecordUninit end\n");

	return 0;
}


#ifdef __cplusplus
};
#endif



  • 0
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值