基于Microsoft Visual Studio2019环境编写ffmpeg视频解码代码

旧代码

  • 旧代码使用了很多过时的API,这些API使用后,vs会报编译器警告 (级别 3) C4996的错误
  • 即 函数被声明为已否决 报 C4996的错误
// test_ffmpeg.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
#define SDL_MAIN_HANDLED
#define __STDC_CONSTANT_MACROS
#pragma warning(disable: 4996)

#include <iostream>
#include <SDL2/SDL.h>

extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
}

int main(int argc,char* argv[])
{
	AVFormatContext *pFormatCtx;
	int i, videoindex;
	AVCodecContext* pCodeCtx;
	AVCodec* pCodec;
	AVFrame* pFrame, *pFrameYUV;
	uint8_t* out_buffer;
	AVPacket* packet;
	//int y_size;
	int ret, got_picture;
	struct SwsContext* img_convert_ctx;
	//输入文件的路径
	char filepath[] = "Titanic.ts";
	int frame_cnt;
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();
	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
		std::cout << "Couldn't open input stream." << std::endl;
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		std::cout << "Couldn't find stream information." << std::endl;
		return -1;
	}
	videoindex = -1;
	for (i = 0;i < pFormatCtx->nb_streams;i++) {
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoindex = i;
			break;
		}
	}
	if (videoindex == -1) {
		std::cout << "Didn't find a video stream." << std::endl;
		return -1;
	}
	pCodeCtx = pFormatCtx->streams[videoindex]->codec;
	pCodec = avcodec_find_decoder(pCodeCtx->codec_id);
	if (pCodec == NULL) {
		std::cout << "Codec not find!" << std::endl;
		return -1;
	}
	if (avcodec_open2(pCodeCtx, pCodec, NULL) < 0) {
		std::cout << "Could not open codec!" << std::endl;
		return -1;
	}
	/*
	此处添加输出视频信息的代码
	取自于pFormatCtx,使用std::cout输出
	*/
	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = (uint8_t*)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodeCtx->width, pCodeCtx->height));
	avpicture_fill((AVPicture*)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodeCtx->width, pCodeCtx->height);
	packet = (AVPacket*)av_malloc(sizeof(AVPacket));
	//Output Info
	std::cout << "--------------------File Information--------------------" << std::endl;
	av_dump_format(pFormatCtx, 0, filepath, 0);
	std::cout << "--------------------------------------------------------" << std::endl;
	img_convert_ctx = sws_getContext(pCodeCtx->width, pCodeCtx->height, pCodeCtx->sw_pix_fmt,
		pCodeCtx->width, pCodeCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	frame_cnt = 0;
	while (av_read_frame(pFormatCtx,packet)>=0)
	{
		if (packet->stream_index == videoindex) {
			/*
			在此处添加输出H264码流的代码
			取自于packet,使用fwrite()输出
			*/
			ret = avcodec_decode_video2(pCodeCtx, pFrame, &got_picture, packet);
			if (ret < 0) {
				std::cout << "Decode Frror!" << std::endl;
				return -1;
			}
			if (got_picture) {
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodeCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);
				std::cout << "Decoded frame index"<< frame_cnt << std::endl;
			/*
			在此处添加输出H264码流的代码
			取自于packet,使用fwrite()输出
			*/
				frame_cnt++;
			}
		}
		av_free_packet(packet);
	}
	sws_freeContext(img_convert_ctx);
	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodeCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}

// 运行程序: Ctrl + F5 或调试 >“开始执行(不调试)”菜单
// 调试程序: F5 或调试 >“开始调试”菜单

// 入门使用技巧: 
//   1. 使用解决方案资源管理器窗口添加/管理文件
//   2. 使用团队资源管理器窗口连接到源代码管理
//   3. 使用输出窗口查看生成输出和其他消息
//   4. 使用错误列表窗口查看错误
//   5. 转到“项目”>“添加新项”以创建新的代码文件,或转到“项目”>“添加现有项”以将现有代码文件添加到项目
//   6. 将来,若要再次打开此项目,请转到“文件”>“打开”>“项目”并选择 .sln 文件

  •  Assertion desc failed at C:\Users\32157\vcpkg\buildtrees\ffmpeg\src\n4.4.1-070f385ab7.clean\libswscale\swscale_internal.h:677
  • c++ - Assertion desc failed at src/libswscale/swscale_internal.h:668 - Stack Overflow
  • 错误原因:在FFmpeg该断言的最新版本中,说明outCodecContext->pix_fmt设置不正确。并且也avpicture_fill已弃用,请av_image_fill_arrays改用。
  • 因此 需要使用新版的API对旧版API进行更新迭代
  • 去除 #pragma warning(disable: 4996) 
  • 显示过时的API如下

对应修改

  • FFmpeg 被声明为已否决 deprecated_Louis_815的博客-CSDN博客
  • api函数替换,在里面搜索ctrl+F,会有英文说明的
  • PIX_FMT_YUV420P -> AV_PIX_FMT_YUV420P
  • 'AVStream::codec': 被声明为已否决:
    • if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
    • =>
    • if(pFormatCtx->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){
  • 'AVStream::codec': 被声明为已否决:
    • pCodecCtx = pFormatCtx->streams[videoindex]->codec;
    • =>
    • pCodecCtx = avcodec_alloc_context3(NULL);
    • avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[videoindex]->codecpar);
  • 'avpicture_get_size': 被声明为已否决:
    • avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)
    • =>
    • #include "libavutil/imgutils.h"
    • av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1)
  • 'avpicture_fill': 被声明为已否决:
    • avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
    • =>
    • av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
  • 'avcodec_decode_video2': 被声明为已否决:
    • ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet); //got_picture_ptr Zero if no frame could be decompressed
    • =>
    • ret = avcodec_send_packet(pCodecCtx, packet);
    • got_picture = avcodec_receive_frame(pCodecCtx, pFrame); //got_picture = 0 success, a frame was returned
    • //注意:got_picture含义相反
    • 或者:
    • int ret = avcodec_send_packet(aCodecCtx, &pkt);
    • if (ret != 0)
    • {
    • prinitf("%s/n","error");
    • return;
    • }
    • while( avcodec_receive_frame(aCodecCtx, &frame) == 0){
    • //读取到一帧音频或者视频
    • //处理解码后音视频 frame
    • }
  • 'av_free_packet': 被声明为已否决:
    • av_free_packet(packet);
    • =>
    • av_packet_unref(packet);
  • 本文代码 未涉及
  • avcodec_decode_audio4:被声明为已否决:
    • int ret = avcodec_send_packet(aCodecCtx, &pkt);
    • if (ret != 0){prinitf("%s/n","error");}
    • while( avcodec_receive_frame(aCodecCtx, &frame) == 0){
    • //读取到一帧音频或者视频
    • //处理解码后音视频 frame
    • }

修改后的代码

// test_ffmpeg.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
#define SDL_MAIN_HANDLED
#define __STDC_CONSTANT_MACROS
#pragma warning(disable: 4819)

#include <iostream>
#include <SDL2/SDL.h>

extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
}

int main(int argc,char* argv[])
{
	AVFormatContext *pFormatCtx;
	int i, videoindex;
	AVCodecContext* pCodeCtx;
	AVCodec* pCodec;
	AVFrame* pFrame, *pFrameYUV;
	uint8_t* out_buffer;
	AVPacket* packet;
	//int y_size;
	int ret, got_picture;
	struct SwsContext* img_convert_ctx;
	//输入文件的路径
	char filepath[] = "Titanic.ts";
	int frame_cnt;
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();
	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
		std::cout << "Couldn't open input stream." << std::endl;
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		std::cout << "Couldn't find stream information." << std::endl;
		return -1;
	}
	videoindex = -1;
	for (i = 0;i < pFormatCtx->nb_streams;i++) {
		if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoindex = i;
			break;
		}
	}
	if (videoindex == -1) {
		std::cout << "Didn't find a video stream." << std::endl;
		return -1;
	}
	pCodeCtx = avcodec_alloc_context3(NULL);
	avcodec_parameters_to_context(pCodeCtx, pFormatCtx->streams[videoindex]->codecpar);
	pCodec = avcodec_find_decoder(pCodeCtx->codec_id);
	if (pCodec == NULL) {
		std::cout << "Codec not find!" << std::endl;
		return -1;
	}
	if (avcodec_open2(pCodeCtx, pCodec, NULL) < 0) {
		std::cout << "Could not open codec!" << std::endl;
		return -1;
	}
	/*
	此处添加输出视频信息的代码
	取自于pFormatCtx,使用std::cout输出
	*/
	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodeCtx->width, pCodeCtx->height,1));
	av_image_fill_arrays(pFrameYUV->data,pFrameYUV->linesize,out_buffer,AV_PIX_FMT_YUV420P,pCodeCtx->width,pCodeCtx->height,1);
	packet = (AVPacket*)av_malloc(sizeof(AVPacket));
	//Output Info
	std::cout << "--------------------File Information--------------------" << std::endl;
	av_dump_format(pFormatCtx, 0, filepath, 0);
	std::cout << "--------------------------------------------------------" << std::endl;
	img_convert_ctx = sws_getContext(pCodeCtx->width, pCodeCtx->height, pCodeCtx->sw_pix_fmt,
		pCodeCtx->width, pCodeCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	frame_cnt = 0;
	while (av_read_frame(pFormatCtx,packet)>=0)
	{
		if (packet->stream_index == videoindex) {
			/*
			在此处添加输出H264码流的代码
			取自于packet,使用fwrite()输出
			*/
			ret = avcodec_send_packet(pCodeCtx, packet);
			//注意 got_picture=0 success,a frame was returned 
			got_picture = avcodec_receive_frame(pCodeCtx, pFrame);
			if (ret < 0) {
				std::cout << "Decode Frror!" << std::endl;
				return -1;
			}
			if (got_picture) {
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodeCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);
				std::cout << "Decoded frame index"<< frame_cnt << std::endl;
			/*
			在此处添加输出H264码流的代码
			取自于packet,使用fwrite()输出
			*/
				frame_cnt++;
			}
		}
		av_packet_unref(packet);
	}
	sws_freeContext(img_convert_ctx);
	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodeCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}

// 运行程序: Ctrl + F5 或调试 >“开始执行(不调试)”菜单
// 调试程序: F5 或调试 >“开始调试”菜单

// 入门使用技巧: 
//   1. 使用解决方案资源管理器窗口添加/管理文件
//   2. 使用团队资源管理器窗口连接到源代码管理
//   3. 使用输出窗口查看生成输出和其他消息
//   4. 使用错误列表窗口查看错误
//   5. 转到“项目”>“添加新项”以创建新的代码文件,或转到“项目”>“添加现有项”以将现有代码文件添加到项目
//   6. 将来,若要再次打开此项目,请转到“文件”>“打开”>“项目”并选择 .sln 文件

 问题仍未解决

  •  还是同一个错误
  •  Assertion desc failed at C:\Users\32157\vcpkg\buildtrees\ffmpeg\src\n4.4.1-070f385ab7.clean\libswscale\swscale_internal.h:677

再次修改代码

// test_ffmpeg.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//

/*
#define SDL_MAIN_HANDLED
#define __STDC_CONSTANT_MACROS
#pragma warning(disable: 4819)

#include <iostream>
#include <SDL2/SDL.h>

extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
}
*/

#pragma warning(disable: 4996)

#include <stdio.h>

#define __STDC_CONSTANT_MACROS

#ifdef _WIN32
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
};
#endif
#endif


int main(int argc, char* argv[])
{
	AVFormatContext* pFormatCtx;
	int             i, videoindex;
	AVCodecContext* pCodecCtx;
	AVCodec* pCodec;
	AVFrame* pFrame, * pFrameYUV;
	uint8_t* out_buffer;
	AVPacket* packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext* img_convert_ctx;

	char filepath[] = "Forrest_Gump_IMAX.mp4";

	FILE* fp_yuv = fopen("output.yuv", "wb+");
	FILE* fp_h264 = fopen("output.h264", "wb+");

	av_register_all();//注册所有组件
	avformat_network_init();//初始化网络
	pFormatCtx = avformat_alloc_context();//初始化一个AVFormatContext

	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {//打开输入的视频文件
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {//获取视频文件信息
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoindex = i;
			break;
		}

	if (videoindex == -1) {
		printf("Didn't find a video stream.\n");
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoindex]->codec;
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);//查找解码器
	if (pCodec == NULL) {
		printf("Codec not found.\n");
		return -1;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {//打开解码器
		printf("Could not open codec.\n");
		return -1;
	}

	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = (uint8_t*)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	avpicture_fill((AVPicture*)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	packet = (AVPacket*)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx, 0, filepath, 0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	while (av_read_frame(pFormatCtx, packet) >= 0) {//读取一帧压缩数据
		if (packet->stream_index == videoindex) {

			fwrite(packet->data, 1, packet->size, fp_h264); //把H264数据写入fp_h264文件

			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);//解码一帧压缩数据
			if (ret < 0) {
				printf("Decode Error.\n");
				return -1;
			}
			if (got_picture) {
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);

				y_size = pCodecCtx->width * pCodecCtx->height;
				fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
				printf("Succeed to decode 1 frame!\n");

			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	/*当av_read_frame()循环退出的时候,实际上解码器中可能还包含剩余的几帧数据。
	因此需要通过“flush_decoder”将这几帧数据输出。
   “flush_decoder”功能简而言之即直接调用avcodec_decode_video2()获得AVFrame,而不再向解码器传递AVPacket。*/
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
			pFrameYUV->data, pFrameYUV->linesize);

		int y_size = pCodecCtx->width * pCodecCtx->height;
		fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
		fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V

		printf("Flush Decoder: Succeed to decode 1 frame!\n");
	}

	sws_freeContext(img_convert_ctx);

	//关闭文件以及释放内存
	fclose(fp_yuv);
	fclose(fp_h264);

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}


// 运行程序: Ctrl + F5 或调试 >“开始执行(不调试)”菜单
// 调试程序: F5 或调试 >“开始调试”菜单

// 入门使用技巧: 
//   1. 使用解决方案资源管理器窗口添加/管理文件
//   2. 使用团队资源管理器窗口连接到源代码管理
//   3. 使用输出窗口查看生成输出和其他消息
//   4. 使用错误列表窗口查看错误
//   5. 转到“项目”>“添加新项”以创建新的代码文件,或转到“项目”>“添加现有项”以将现有代码文件添加到项目
//   6. 将来,若要再次打开此项目,请转到“文件”>“打开”>“项目”并选择 .sln 文件

 最后修改代码

// test_ffmpeg.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
//#pragma warning(disable: 4996)

#include <stdio.h>

#define __STDC_CONSTANT_MACROS

#ifdef _WIN32
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
};
#endif
#include <cerrno>
#endif


int main(int argc, char* argv[])
{
	AVFormatContext* pFormatCtx;
	int             i, videoindex;
	AVCodecContext* pCodecCtx;
	AVCodec* pCodec;
	AVFrame* pFrame, * pFrameYUV;
	uint8_t* out_buffer;
	AVPacket* packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext* img_convert_ctx;

	char filepath[] = "Forrest_Gump_IMAX.mp4";

	//FILE* fp_yuv = fopen("output.yuv", "wb+");
	errno_t err;
	FILE* fp_yuv = NULL;
	if ((err = fopen_s(&fp_yuv, "output.yuv", "wb+")) != 0) {
		printf("Couldn't open fp_yuv.\n");
	}
	//FILE* fp_h264 = fopen("output.h264", "wb+");
	FILE* fp_h264 = NULL;
	if ((err = fopen_s(&fp_h264, "output.h264", "wb+")) != 0) {
		printf("Couldn't open fp_h264.\n");
	}
	//av_register_all();//注册所有组件
	avformat_network_init();//初始化网络
	pFormatCtx = avformat_alloc_context();//初始化一个AVFormatContext

	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {//打开输入的视频文件
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {//获取视频文件信息
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoindex = i;
			break;
		}

	if (videoindex == -1) {
		printf("Didn't find a video stream.\n");
		return -1;
	}

	//pCodecCtx = pFormatCtx->streams[videoindex]->codec;
	pCodecCtx = avcodec_alloc_context3(NULL);
	avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[videoindex]->codecpar);
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);//查找解码器
	if (pCodec == NULL) {
		printf("Codec not found.\n");
		return -1;
	}
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {//打开解码器
		printf("Could not open codec.\n");
		return -1;
	}

	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height,1));
	//avpicture_fill((AVPicture*)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P,
		pCodecCtx->width, pCodecCtx->height, 1);
	packet = (AVPacket*)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx, 0, filepath, 0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	while (av_read_frame(pFormatCtx, packet) >= 0) {//读取一帧压缩数据
		if (packet->stream_index == videoindex) {

			fwrite(packet->data, 1, packet->size, fp_h264); //把H264数据写入fp_h264文件

			//ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);//解码一帧压缩数据
			ret = avcodec_send_packet(pCodecCtx, packet);
			if (ret < 0) {
				printf("Error sending a packet for decoding.\n");
				return -1;
			}
			//while (ret >= 0) {
				got_picture = avcodec_receive_frame(pCodecCtx, pFrame);
				//缺失错误处理和检测
				if (got_picture==0) {
					sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
						pFrameYUV->data, pFrameYUV->linesize);

					y_size = pCodecCtx->width * pCodecCtx->height;
					fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
					fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
					fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
					printf("Succeed to decode 1 frame!\n");
				}
			//}
		}
		//av_free_packet(packet);
		av_packet_unref(packet);
	}
	//flush decoder
	/*当av_read_frame()循环退出的时候,实际上解码器中可能还包含剩余的几帧数据。
	因此需要通过“flush_decoder”将这几帧数据输出。
   “flush_decoder”功能简而言之即直接调用avcodec_decode_video2()获得AVFrame,而不再向解码器传递AVPacket。*/
	while (1) {
		//ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		ret = avcodec_send_packet(pCodecCtx, packet);
		if (ret < 0)
			break;
		got_picture = avcodec_receive_frame(pCodecCtx, pFrame);
		if (got_picture!=0)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
			pFrameYUV->data, pFrameYUV->linesize);

		int y_size = pCodecCtx->width * pCodecCtx->height;
		fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
		fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V

		printf("Flush Decoder: Succeed to decode 1 frame!\n");
	}

	sws_freeContext(img_convert_ctx);

	//关闭文件以及释放内存
	fclose(fp_yuv);
	fclose(fp_h264);

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}


// 运行程序: Ctrl + F5 或调试 >“开始执行(不调试)”菜单
// 调试程序: F5 或调试 >“开始调试”菜单

// 入门使用技巧: 
//   1. 使用解决方案资源管理器窗口添加/管理文件
//   2. 使用团队资源管理器窗口连接到源代码管理
//   3. 使用输出窗口查看生成输出和其他消息
//   4. 使用错误列表窗口查看错误
//   5. 转到“项目”>“添加新项”以创建新的代码文件,或转到“项目”>“添加现有项”以将现有代码文件添加到项目
//   6. 将来,若要再次打开此项目,请转到“文件”>“打开”>“项目”并选择 .sln 文件

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用FFmpeg进行dav转mp4的cpp代码(在Visual Studio环境下): ``` extern "C" { #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavutil/avutil.h> #include <libavutil/dict.h> #include <libavutil/error.h> #include <libavutil/mathematics.h> #include <libavutil/opt.h> #include <libavutil/samplefmt.h> #include <libswscale/swscale.h> } int main() { const char* input_file_path = "input.dav"; const char* output_file_path = "output.mp4"; // Open input file AVFormatContext* format_ctx = nullptr; if (avformat_open_input(&format_ctx, input_file_path, nullptr, nullptr) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not open input file %s\n", input_file_path); return -1; } // Get stream info if (avformat_find_stream_info(format_ctx, nullptr) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not find stream information\n"); return -1; } // Find video and audio stream AVCodec* video_codec = nullptr; AVCodec* audio_codec = nullptr; int video_stream_index = -1; int audio_stream_index = -1; for (int i = 0; i < format_ctx->nb_streams; ++i) { if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { video_stream_index = i; video_codec = avcodec_find_decoder(format_ctx->streams[i]->codecpar->codec_id); } else if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { audio_stream_index = i; audio_codec = avcodec_find_decoder(format_ctx->streams[i]->codecpar->codec_id); } } if (video_stream_index == -1 && audio_stream_index == -1) { av_log(nullptr, AV_LOG_ERROR, "Could not find any video or audio stream\n"); return -1; } // Open video codec AVCodecContext* video_codec_ctx = nullptr; if (video_codec != nullptr) { video_codec_ctx = avcodec_alloc_context3(video_codec); if (avcodec_parameters_to_context(video_codec_ctx, format_ctx->streams[video_stream_index]->codecpar) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not copy video codec parameters to context\n"); return -1; } if (avcodec_open2(video_codec_ctx, video_codec, nullptr) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not open video codec\n"); return -1; } } // Open audio codec AVCodecContext* audio_codec_ctx = nullptr; if (audio_codec != nullptr) { audio_codec_ctx = avcodec_alloc_context3(audio_codec); if (avcodec_parameters_to_context(audio_codec_ctx, format_ctx->streams[audio_stream_index]->codecpar) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not copy audio codec parameters to context\n"); return -1; } if (avcodec_open2(audio_codec_ctx, audio_codec, nullptr) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not open audio codec\n"); return -1; } } // Open output file AVFormatContext* output_format_ctx = nullptr; if (avformat_alloc_output_context2(&output_format_ctx, nullptr, nullptr, output_file_path) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not create output context\n"); return -1; } // Add video stream to output file AVStream* video_stream = nullptr; if (video_codec_ctx != nullptr) { video_stream = avformat_new_stream(output_format_ctx, video_codec); if (video_stream == nullptr) { av_log(nullptr, AV_LOG_ERROR, "Could not create video stream\n"); return -1; } if (avcodec_parameters_copy(video_stream->codecpar, format_ctx->streams[video_stream_index]->codecpar) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not copy video codec parameters to output stream\n"); return -1; } if (avcodec_parameters_to_context(video_stream->codec, video_stream->codecpar) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not copy video codec parameters to output stream context\n"); return -1; } video_stream->codec->codec_tag = 0; video_stream->time_base = format_ctx->streams[video_stream_index]->time_base; } // Add audio stream to output file AVStream* audio_stream = nullptr; if (audio_codec_ctx != nullptr) { audio_stream = avformat_new_stream(output_format_ctx, audio_codec); if (audio_stream == nullptr) { av_log(nullptr, AV_LOG_ERROR, "Could not create audio stream\n"); return -1; } if (avcodec_parameters_copy(audio_stream->codecpar, format_ctx->streams[audio_stream_index]->codecpar) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not copy audio codec parameters to output stream\n"); return -1; } if (avcodec_parameters_to_context(audio_stream->codec, audio_stream->codecpar) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not copy audio codec parameters to output stream context\n"); return -1; } audio_stream->codec->codec_tag = 0; audio_stream->time_base = format_ctx->streams[audio_stream_index]->time_base; } // Open output file for writing if (!(output_format_ctx->oformat->flags & AVFMT_NOFILE)) { if (avio_open(&output_format_ctx->pb, output_file_path, AVIO_FLAG_WRITE) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not open output file %s\n", output_file_path); return -1; } } // Write header to output file if (avformat_write_header(output_format_ctx, nullptr) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not write header to output file\n"); return -1; } // Convert video frames AVFrame* video_frame = av_frame_alloc(); AVFrame* video_frame_rgb = av_frame_alloc(); if (video_codec_ctx != nullptr) { SwsContext* sws_ctx = sws_getContext(video_codec_ctx->width, video_codec_ctx->height, video_codec_ctx->pix_fmt, video_codec_ctx->width, video_codec_ctx->height, AV_PIX_FMT_RGB24, SWS_BILINEAR, nullptr, nullptr, nullptr); if (sws_ctx == nullptr) { av_log(nullptr, AV_LOG_ERROR, "Could not create SwsContext\n"); return -1; } av_image_alloc(video_frame_rgb->data, video_frame_rgb->linesize, video_codec_ctx->width, video_codec_ctx->height, AV_PIX_FMT_RGB24, 1); AVPacket packet; av_init_packet(&packet); while (av_read_frame(format_ctx, &packet) >= 0) { if (packet.stream_index == video_stream_index) { if (avcodec_send_packet(video_codec_ctx, &packet) == 0) { while (avcodec_receive_frame(video_codec_ctx, video_frame) == 0) { sws_scale(sws_ctx, video_frame->data, video_frame->linesize, 0, video_codec_ctx->height, video_frame_rgb->data, video_frame_rgb->linesize); video_frame_rgb->pts = video_frame->pts; if (avcodec_send_frame(video_stream->codec, video_frame_rgb) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not send video frame to output stream\n"); return -1; } while (avcodec_receive_packet(video_stream->codec, &packet) == 0) { if (av_write_frame(output_format_ctx, &packet) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not write video packet to output file\n"); return -1; } av_packet_unref(&packet); } } } } av_packet_unref(&packet); } avcodec_send_frame(video_stream->codec, nullptr); while (avcodec_receive_packet(video_stream->codec, &packet) == 0) { if (av_write_frame(output_format_ctx, &packet) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not write video packet to output file\n"); return -1; } av_packet_unref(&packet); } sws_freeContext(sws_ctx); } // Convert audio frames AVFrame* audio_frame = av_frame_alloc(); if (audio_codec_ctx != nullptr) { AVPacket packet; av_init_packet(&packet); while (av_read_frame(format_ctx, &packet) >= 0) { if (packet.stream_index == audio_stream_index) { if (avcodec_send_packet(audio_codec_ctx, &packet) == 0) { while (avcodec_receive_frame(audio_codec_ctx, audio_frame) == 0) { audio_frame->pts = audio_frame->best_effort_timestamp; if (avcodec_send_frame(audio_stream->codec, audio_frame) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not send audio frame to output stream\n"); return -1; } while (avcodec_receive_packet(audio_stream->codec, &packet) == 0) { if (av_write_frame(output_format_ctx, &packet) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not write audio packet to output file\n"); return -1; } av_packet_unref(&packet); } } } } av_packet_unref(&packet); } avcodec_send_frame(audio_stream->codec, nullptr); while (avcodec_receive_packet(audio_stream->codec, &packet) == 0) { if (av_write_frame(output_format_ctx, &packet) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not write audio packet to output file\n"); return -1; } av_packet_unref(&packet); } } // Write trailer to output file if (av_write_trailer(output_format_ctx) < 0) { av_log(nullptr, AV_LOG_ERROR, "Could not write trailer to output file\n"); return -1; } // Close output file if (!(output_format_ctx->oformat->flags & AVFMT_NOFILE)) { avio_close(output_format_ctx->pb); } // Free resources avcodec_free_context(&video_codec_ctx); avcodec_free_context(&audio_codec_ctx); avformat_close_input(&format_ctx); avformat_free_context(output_format_ctx); av_frame_free(&video_frame); av_frame_free(&video_frame_rgb); av_frame_free(&audio_frame); return 0; } ``` 这段代码使用了FFmpeg的多个库来进行视频和音频的解码、转换和编码,具体实现细节可以参考FFmpeg的官方文档。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值