配置为ubuntu11.10,ffmpeg版本为0.8.3。
由于ffmpeg官网上的avcodec_sample.c例子好多年没人更新过了,现在ffmpge己更新了好多,编译出来N多函数找不到,错误百出,结果在以下网站找到2009年有人更新过这个例子(google上搜avcodec_sample可搜到):
http://www.google.com.hk/url?sa=t&source=web&cd=1&ved=0CBoQFjAA&url=http%3A%2F%2Fweb.me.com%2Fdhoerl%2FHome%2FTech_Blog%2FEntries%2F2009%2F1%2F22_Revised_avcodec_sample.c_files%2Favcodec_sample.0.5.0.c&rct=j&q=avcodec_sample&ei=bppwTubbLeioiAeypvWgBg&usg=AFQjCNGzwZJZbXQR76ugoiLMInowNcdYug&cad=rjt
注意事项:
1.用g++编译,需要对使用的ffmpeg文件进用extern "C"处理;
2.用g++编译,需要加上对UINT64_C的宏定义;
3.链接时要注意链接顺序,不然会出错,一般顺序为:-lavformat -lavcodec -lavutil -lswscale -lz -lm -lpthread
============================================================================
略改2009年的版本例子,即可编译通过,例子如下
============================================================================
// avcodec_sample.0.5.0.c
// A small sample program that shows how to use libavformat and libavcodec to
// read video from a file.
//
// This version is for the 0.4.9+ release of ffmpeg. This release adds the
// av_read_frame() API call, which simplifies the reading of video frames
// considerably.
//
// Use
//
// gcc -o avcodec_sample.0.5.0 avcodec_sample.0.5.0.c -lavformat -lavcodec -lavutil -lswscale -lz -lbz2
//
// to build (assuming libavformat, libavcodec, libavutil, and swscale are correctly installed on
// your system).
//
// Run using
//
// avcodec_sample.0.5.0 myvideofile.mpg
//
// to write the first five frames from "myvideofile.mpg" to disk in PPM
// format.
#define UINT64_C uint64_t
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
}
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame);
int main(int argc, const char * argv[])
{
AVFormatContext *pFormatCtx;
unsigned int i;
int videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameRGB;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer;
// Register all formats and codecs
av_register_all();
// Open video file
if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
{
return -1; // Couldn't open file
}
// Retrieve stream information
if (av_find_stream_info(pFormatCtx) < 0)
{
return -1; // Couldn't find stream information
}
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, argv[1], false);
// Find the first video stream
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
{
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStream = i;
break;
}
}
if (-1 == videoStream)
{
return -1; // Didn't find a video stream
}
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (NULL == pCodec)
{
return -1; // Codec not found
}
// Open codec
if (avcodec_open(pCodecCtx, pCodec) < 0)
{
return -1; // Could not open codec
}
// Hack to correct wrong frame rates that seem to be generated by some codecs
if (pCodecCtx->time_base.num > 1000 && pCodecCtx->time_base.den == 1)
{
pCodecCtx->time_base.den = 1000;
}
// Allocate video frame
pFrame = avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB = avcodec_alloc_frame();
if (pFrameRGB == NULL)
{
return -1;
}
// Determine required buffer size and allocate buffer
numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) malloc(numBytes);
// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// Read frames and save first five frames to disk
i = 0;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
// Is this a packet from the video stream?
if (packet.stream_index == videoStream)
{
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished)
{
static struct SwsContext *img_convert_ctx;
#if 0
// Older removed code
// Convert the image from its native format to RGB swscale
img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24,
(AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width,
pCodecCtx->height);
// function template, for reference
int sws_scale(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[]);
#endif
// Convert the image into YUV format that SDL uses
if (img_convert_ctx == NULL)
{
int w = pCodecCtx->width;
int h = pCodecCtx->height;
img_convert_ctx = sws_getContext(w, h, pCodecCtx->pix_fmt,
w, h, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
if (img_convert_ctx == NULL)
{
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
}
}
int ret = sws_scale(img_convert_ctx, pFrame->data,
pFrame->linesize, 0, pCodecCtx->height,
pFrameRGB->data, pFrameRGB->linesize);
#if 0 // this use to be true, as of 1/2009, but apparently it is no longer true in 3/2009
if(ret)
{
fprintf(stderr, "SWS_Scale failed [%d]!\n", ret);
exit(-1);
}
#endif
// Save the frame to disk
if (i++ >= 8000)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
return 0;
}
static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
{
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "frame%d.ppm", iFrame);
pFile = fopen(szFilename, "wb");
if (pFile == NULL)
{
return;
}
// Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data
for (y = 0; y < height; y++)
{
fwrite(pFrame->data[0] + y * pFrame->linesize[0], 1, width * 3, pFile);
}
// Close file
fclose(pFile);
}