这篇关于ffmpeg的视音频解码,是我研究了很久的东西,现在分享给大家,希望可以和大家多多交流。
下面有官网链接去下载ffmpeg的库 https://ffmpeg.zeranoe.com/builds/。
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define MAX_AUDIO_FRAME_SIZE 192000
#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
#define __STDC_CONSTANT_MACROS
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/imgutils.h"
#include "libswresample/swresample.h"
};
int main(int argc, char* argv[]) {
AVFormatContext* pFormatCtx;//封装格式上下文结构体,保存了视频文件封装格式的所有信息。
int i, videoindex;
AVCodecContext* pCodecCtx, * pAudiocodec;//视频的格式信息
AVPacket* packet;
unsigned char* out_buffer, * out_buffer1;
int ret, got_picture;//
int64_t in_channel_layout;
struct SwrContext* au_convert_ctx;
av_register_all();//注册所有组件
avformat_network_init();//注册网络环境
pFormatCtx = avformat_alloc_context();//初始化(分配动态内存)
char filepath[]="E:\\cuc_ieschool.flv";
//char filepath[500] = { 0 };
//strcpy(filepath, argv[1]);
avdevice_register_all();
//AVInputFormat *ifmt=av_find_input_format("vfwcap");
//if(avformat_open_input(&pFormatCtx,"0",ifmt,NULL)!=0) {printf("Couldn't open input stream./dev/video0\n"); return -1; }
avformat_open_input(&pFormatCtx, filepath, 0, 0);
struct SwsContext* img_convert_ctx;//主要用于视频图像的转换
/*if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0)//打开文件
{
return -1;
}*/
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)//打开码流
{
return -1;
}
AVCodec* pcodec,*pAudio;
AVFrame* pFrame, * pFrameYUV;
pcodec = avcodec_find_decoder(pFormatCtx->streams[0]->codec->codec_id);//查找属于那个码流
pAudio = avcodec_find_decoder(pFormatCtx->streams[1]->codec->codec_id);
avcodec_open2(pFormatCtx->streams[0]->codec, pcodec, NULL);
avcodec_open2(pFormatCtx->streams[1]->codec, pAudio, NULL);
packet = (AVPacket*)av_malloc(sizeof(AVPacket));//初始化分配内存。
// 创建一个AVFrame,用来存放解码后的一帧的数据
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
pCodecCtx = pFormatCtx->streams[0]->codec;
pAudiocodec = pFormatCtx->streams[1]->codec;
printf("%d\n", pFormatCtx->streams[0]->codec->width);
// av_image_get_buffer_size:返回使用给定参数存储图像所需的数据量的字节大小
out_buffer = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
// 根据指定的图像参数和提供的数组设置数据指针和线条(data pointers and linesizes)
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
// sws_getContext():初始化一个SwsContext 图像格式转换
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, 4, NULL, NULL, NULL);
//音频处理
//Out Audio Param 连接完毕
uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;
//nb_samples: AAC-1024 MP3-1152
int out_nb_samples = pAudiocodec->frame_size;
AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
int out_sample_rate = 44100;
int out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
//Out Buffer Size
int out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);
out_buffer1 = (uint8_t*)av_malloc(MAX_AUDIO_FRAME_SIZE * 2);
AVFrame *pFrame1 = av_frame_alloc();
//FIX:Some Codec's Context Information is missing
in_channel_layout = av_get_default_channel_layout(pAudiocodec->channels);
//Swr
au_convert_ctx = swr_alloc();
au_convert_ctx = swr_alloc_set_opts(au_convert_ctx, out_channel_layout, out_sample_fmt, out_sample_rate,
in_channel_layout, pAudiocodec->sample_fmt, pAudiocodec->sample_rate, 0, NULL);
swr_init(au_convert_ctx);
FILE* fp_264 = fopen("cjh264.h264", "wb");
FILE* fp_yuv = fopen("output.yuv", "wb");
FILE* fp_yuv1 = fopen("output1.yuv", "wb");
FILE* pFile = fopen("output.pcm", "wb");
while (av_read_frame(pFormatCtx, packet) >= 0)//获取每帧的h.264,acc码流
{
if (packet->stream_index == 1) {
ret = avcodec_decode_audio4(pAudiocodec, pFrame1, &got_picture, packet);
if (ret < 0) {
printf("Error in decoding audio frame.\n");
return -1;
}
if (got_picture > 0) {
swr_convert(au_convert_ctx, &out_buffer1, MAX_AUDIO_FRAME_SIZE, (const uint8_t**)pFrame1->data, pFrame1->nb_samples);
//Write PCM
fwrite(out_buffer1, 1, out_buffer_size, pFile);
}
}
if (packet->stream_index == 0) {
fwrite(packet->data, 1, packet->size, fp_264);
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);//转换成yuv数据
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
// sws_scale():处理图像数据,用于转换像素
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
// 根据YUV数据格式,分离Y、U、V数据
// 如果视频帧的宽和高分别为w和h,那么一帧YUV420P像素数据一共占用w*h*3/2 Byte的数据
// 其中前w*h Byte存储Y,接着的w*h*1/4 Byte存储U,最后w*h*1/4 Byte存储V
int y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv1);
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
printf("Succeed to decode 1 frame!\n");
}
}
av_free_packet(packet);
}
fclose(fp_264);
fclose(fp_yuv);
fclose(fp_yuv1);
printf("%s\n", pcodec->name);
printf("%d\n", pFormatCtx->duration);
printf("%d\n", pFormatCtx->nb_streams);
printf("%s\n", pFormatCtx->iformat->long_name);
printf("%d", pFormatCtx->streams[0]->codec->height);
printf("%s", avcodec_configuration());
printf("cjh!!!");
return 0;
}