#include <stdio.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/frame.h>
#include <libswscale/swscale.h>
/*
*
*/
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame);
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
{
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "1/frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
{return; }
//Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
//Write pixel data
for(y=0; y<height; y++)
{
fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);
}
//Close file
fclose(pFile);
}
/*
*
*/
/*Main Function*/
int main(int argc, char *argv[])
{
AVFormatContext *pFormatCtx;
int i, videoStreamIdx;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameRGB;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer;
static struct SwsContext *img_convert_ctx;
if(argc < 2){
printf("Please provide a video file\n");
return -1;
}
// /*注册所有可用的格式和编解码器*/
av_register_all();
//Open video file
/*以输入方式打开一个媒体文件,也即源文件,
codecs并没有打开,只读取了文件的头信息*/
if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
{return -1;} //Couldn't open file
//Retrieve stream information
/*通过读取媒体文件中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,
// 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.
*/
if(avformat_find_stream_info(pFormatCtx, NULL) < 0)
{return -1;} // Couldn't find stream information
// Dump information about file onto standard error
//该函数的作用就是检查下初始化过程中设置的参数是否符合规范
av_dump_format(pFormatCtx, 0, argv[1], 0);
//Find the first video stream
videoStreamIdx=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{ videoStreamIdx=i;
break;}
}
if(videoStreamIdx==-1)
return -1;// Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStreamIdx]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL)
{
fprintf(stderr, "Unsupported codec!\n");
return -1; //Codec not found
}
//Open codec //使用给定的AVCodec初始化AVCodecContext
if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{return -1;} // Could not open codec
//Allocate video frame
pFrame = av_frame_alloc();
//Allocate an AVFrame structure
pFrameRGB = av_frame_alloc();
if(pFrameRGB==NULL)
{return -1;}
//Determine required buffer size and allocate buffer
numBytes = avpicture_get_size(AV_PIX_FMT_RGB24,
pCodecCtx->width,
pCodecCtx->height);
buffer = (uint8_t *) av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
int w = pCodecCtx->width;
int h = pCodecCtx->height;
img_convert_ctx = sws_getContext(w, h, pCodecCtx->pix_fmt,
w, h, AV_PIX_FMT_RGB24,
SWS_LANCZOS, NULL, NULL, NULL);
//Read frames and save first five frames to disk
i=0;
while((av_read_frame(pFormatCtx, &packet)>=0) && (i<50))
{
// Is this a packet from the video stream?
if(packet.stream_index==videoStreamIdx)
{
//Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
//Did we get a video frame?
if(frameFinished)
{
i++;
i = i % 15;
// Convert the image from its native format to RGB
sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height,
pFrameRGB->data, pFrameRGB->linesize);
// Save the frame to disk
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
}
}
//Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
//Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
//Free the YUV frame
av_free(pFrame);
sws_freeContext(img_convert_ctx);
//Close the codec
avcodec_close(pCodecCtx);
//Close the video file
avformat_close_input(&pFormatCtx);
/*FINE PROGRAMMA*/
return 0;
}
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/frame.h>
#include <libswscale/swscale.h>
/*
*
*/
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame);
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
{
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "1/frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
{return; }
//Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
//Write pixel data
for(y=0; y<height; y++)
{
fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);
}
//Close file
fclose(pFile);
}
/*
*
*/
/*Main Function*/
int main(int argc, char *argv[])
{
AVFormatContext *pFormatCtx;
int i, videoStreamIdx;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameRGB;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer;
static struct SwsContext *img_convert_ctx;
if(argc < 2){
printf("Please provide a video file\n");
return -1;
}
// /*注册所有可用的格式和编解码器*/
av_register_all();
//Open video file
/*以输入方式打开一个媒体文件,也即源文件,
codecs并没有打开,只读取了文件的头信息*/
if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
{return -1;} //Couldn't open file
//Retrieve stream information
/*通过读取媒体文件中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,
// 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.
*/
if(avformat_find_stream_info(pFormatCtx, NULL) < 0)
{return -1;} // Couldn't find stream information
// Dump information about file onto standard error
//该函数的作用就是检查下初始化过程中设置的参数是否符合规范
av_dump_format(pFormatCtx, 0, argv[1], 0);
//Find the first video stream
videoStreamIdx=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{ videoStreamIdx=i;
break;}
}
if(videoStreamIdx==-1)
return -1;// Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStreamIdx]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL)
{
fprintf(stderr, "Unsupported codec!\n");
return -1; //Codec not found
}
//Open codec //使用给定的AVCodec初始化AVCodecContext
if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{return -1;} // Could not open codec
//Allocate video frame
pFrame = av_frame_alloc();
//Allocate an AVFrame structure
pFrameRGB = av_frame_alloc();
if(pFrameRGB==NULL)
{return -1;}
//Determine required buffer size and allocate buffer
numBytes = avpicture_get_size(AV_PIX_FMT_RGB24,
pCodecCtx->width,
pCodecCtx->height);
buffer = (uint8_t *) av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
int w = pCodecCtx->width;
int h = pCodecCtx->height;
img_convert_ctx = sws_getContext(w, h, pCodecCtx->pix_fmt,
w, h, AV_PIX_FMT_RGB24,
SWS_LANCZOS, NULL, NULL, NULL);
//Read frames and save first five frames to disk
i=0;
while((av_read_frame(pFormatCtx, &packet)>=0) && (i<50))
{
// Is this a packet from the video stream?
if(packet.stream_index==videoStreamIdx)
{
//Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
//Did we get a video frame?
if(frameFinished)
{
i++;
i = i % 15;
// Convert the image from its native format to RGB
sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height,
pFrameRGB->data, pFrameRGB->linesize);
// Save the frame to disk
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
}
}
//Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
//Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
//Free the YUV frame
av_free(pFrame);
sws_freeContext(img_convert_ctx);
//Close the codec
avcodec_close(pCodecCtx);
//Close the video file
avformat_close_input(&pFormatCtx);
/*FINE PROGRAMMA*/
return 0;
}