利用ffmpeg来进行视频解码(H.264)

此代码是在 ffmpeg工程组  佰锐科技 提供的代码上修改而成,还参考了http://www.rosoo.net/a/201006/9659.html

bool InitH264Codec(AVCodecContext * &av_codec_context, AVFrame * &picture, AVCodec* codec)
{
	av_codec_context = NULL;
	picture = NULL;

	av_codec_context = avcodec_alloc_context();
	picture = avcodec_alloc_frame();

	if(!av_codec_context || !picture)
		return false;

	if (avcodec_open(av_codec_context, codec) < 0)
	{
		OutputDebugString("could not open codec\n");
		return false;
	}

	//H264Context *h = av_codec_context->priv_data;
	//MpegEncContext *s = &h->s;
	//s->dsp.idct_permutation_type =1;
	//dsputil_init(&s->dsp, av_codec_context);

	return true;
}


 

bool DecodePlayH264(AVCodec* codec, AVCodecContext* av_codec_context, AVFrame *picture, \
	char*data, int len, int nSide, CClientDC * pdc)
{
	//bool bRet = false;

	//解码数据
	int got_picture = 0;
	int consumed_bytes = avcodec_decode_video(av_codec_context, picture, &got_picture, (const uint8_t*)data, len);
	if (consumed_bytes<0)
	{
		OutputDebugString("Error while decoding\n");
		return false;
	}
	//播放
	if(got_picture)
	{
		OutputDebugString("got picture\n");
		H264ShowPic(nSide, picture, av_codec_context, pdc);
	}

	return true;
}


 

//H264保存解码数据
void H264SaveYUVData(int nSide, AVFrame * &picture, AVCodecContext * &context)
{
	unsigned char * Y = (nSide==0)?  Y1 : Y2;
	unsigned char * Cr = (nSide==0)?  Cr1 : Cr2;
	unsigned char * Cb = (nSide==0)?  Cb1 : Cb2;

	for(int i=0; i<context->height; i++)
	{
		memcpy(Y, picture->data[0]+i*picture->linesize[0], context->width);
		Y+= context->width;
	}

	for(int i=0; i<context->height/2; i++)
	{
		memcpy(Cr, picture->data[1]+i*picture->linesize[1], context->width/2);
		Cr+= context->width/2;
	}

	for(int i=0; i<context->height; i++)
	{
		memcpy(Cb, picture->data[2]+i*picture->linesize[2], context->width/2);
		Cb+= context->width/2;
	}

	return;
}

 

void H264ShowPic(int nSide, AVFrame * &picture, AVCodecContext * &context, CClientDC * pdc)
{
	unsigned char *Y, *Cr, *Cb, *RGB;

	Y = (nSide==0)?  Y1 : Y2;
	Cr = (nSide==0)?  Cr1 : Cr2;
	Cb = (nSide==0)?  Cb1 : Cb2;
	RGB = (nSide==0)?  RGB1 : RGB2;

	H264SaveYUVData(nSide, picture, context);  //保存YUV数据
	ConvertYUV2RGB(Y, Cb, Cr, RGB, context->width, context->height); //将YUV数据转换成RGB图像

	Draw(nSide, context->width, context->height, pdc);
}

 

全局变量:

AVCodec h264_decoder = {
 "h264",
 CODEC_TYPE_VIDEO,
 CODEC_ID_H264,
 sizeof(H264Context),
 decode_init,
 NULL,
 decode_end,
 decode_frame,
 /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
 NULL,
 flush_dpb,
 NULL,
 NULL,
 NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
 NULL
};

 

主函数(此处贴上的是原来的主函数):
int main(int argc, char **argv)
{
	const char *outfilename = "outrec.txt";
	const char *outrecfilename = "outrec.yuv";
	const char *filename = "test.h264";
	extern AVCodec h264_decoder;
	AVCodec *codec = &h264_decoder;
	AVCodecContext *av_codec_context = NULL;
	int frame, size, got_picture, len;
	FILE *fin, *fout;
	AVFrame *picture;
	uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE], *inbuf_ptr;
	char buf[1024]; 
	DSPContext dsp;

	/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
	memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

	printf("Video decoding\n");

	/* find the mpeg1 video decoder */
	avcodec_init();
	av_codec_context = avcodec_alloc_context();
	picture = avcodec_alloc_frame();
	//	 dsputil_init(&dsp, c);

	if(codec->capabilities&CODEC_CAP_TRUNCATED)
		av_codec_context->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

	/* For some codecs, such as msmpeg4 and mpeg4, width and height
	MUST be initialized there because this information is not
	available in the bitstream. */

	/* open it */


	if (avcodec_open(av_codec_context, codec) < 0) {
		fprintf(stderr, "could not open codec\n");
		exit(1);
	}
	{

		H264Context *h = av_codec_context->priv_data;
		MpegEncContext *s = &h->s;
		s->dsp.idct_permutation_type =1;
		dsputil_init(&s->dsp, av_codec_context);
	}
	/* the codec gives us the frame size, in samples */

	fin = fopen(filename, "rb");
	if (!fin) {
		fprintf(stderr, "could not open %s\n", filename);
		exit(1);
	}
	fout = fopen(outfilename, "wb");
	if (!fin) {
		fprintf(stderr, "could not open %s\n", outfilename);
		exit(1);
	}
	fclose(fout);

	fout = fopen(outrecfilename, "wb");
	if (!fin) {
		fprintf(stderr, "could not open %s\n", outrecfilename);
		exit(1);
	}
	fclose(fout);

	frame = 0;
	for(;;) {
		size = fread(inbuf, 1, INBUF_SIZE, fin);
		if (size == 0)
			break;

		/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
		and this is the only method to use them because you cannot
		know the compressed data size before analysing it.

		BUT some other codecs (msmpeg4, mpeg4) are inherently frame
		based, so you must call them with all the data for one
		frame exactly. You must also initialize 'width' and
		'height' before initializing them. */

		/* NOTE2: some codecs allow the raw parameters (frame size,
		sample rate) to be changed at any frame. We handle this, so
		you should also take care of it */

		/* here, we use a stream based decoder (mpeg1video), so we
		feed decoder and see if it could decode a frame */
		inbuf_ptr = inbuf;
		while (size > 0) {
			len = avcodec_decode_video(av_codec_context, picture, &got_picture,
				inbuf_ptr, size);
			if (len < 0) {
				fprintf(stderr, "Error while decoding frame %d\n", frame);
				exit(1);
			}
			if (got_picture) {
				printf("saving frame %3d\n", frame);
				fflush(stdout);

				/* the picture is allocated by the decoder. no need to
				free it */
				//  snprintf(buf, sizeof(buf), outfilename, frame);
				pgm_save(picture->data[0], picture->linesize[0],
					av_codec_context->width, av_codec_context->height, outfilename, outrecfilename);
				pgm_save(picture->data[1], picture->linesize[1],
					av_codec_context->width/2, av_codec_context->height/2, outfilename, outrecfilename);
				pgm_save(picture->data[2], picture->linesize[2],
					av_codec_context->width/2, av_codec_context->height/2, outfilename, outrecfilename);
				frame++;
			}
			size -= len;
			inbuf_ptr += len;
		}
	}

	/* some codecs, such as MPEG, transmit the I and P frame with a
	latency of one frame. You must do the following to have a
	chance to get the last frame of the video */
#define NOTFOR264
#ifdef NOTFOR264

	//    len = avcodec_decode_video(c, picture, &got_picture,
	//                               NULL, 0);
	len = avcodec_decode_video(av_codec_context, picture, &got_picture,
		inbuf_ptr, 0);
	if (got_picture) {
		printf("saving last frame %3d\n", frame);
		fflush(stdout);

		/* the picture is allocated by the decoder. no need to
		free it */
		//    snprintf(buf, sizeof(buf), outfilename, frame);
		pgm_save(picture->data[0], picture->linesize[0],
			av_codec_context->width, av_codec_context->height, outfilename, outrecfilename);
		pgm_save(picture->data[1], picture->linesize[1],
			av_codec_context->width/2, av_codec_context->height/2, outfilename, outrecfilename);
		pgm_save(picture->data[2], picture->linesize[2],
			av_codec_context->width/2, av_codec_context->height/2, outfilename, outrecfilename);
		frame++;
	}
#endif

	fclose(fin);
	//	 fclose(fout);

	avcodec_close(av_codec_context);
	av_free(av_codec_context);
	av_free(picture);
	printf("\n");
}


 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值