声明:版权归dranger大神,参考地址为:http://dranger.com/ffmpeg/tutorial01.html, 我只是将教程中不适合0.7版本ffmpeg的api的代码改为可在0.7版本ffmpeg下编译运行的程序,
编译环境为:centos 5.9, ffmpeg版本:0.7.15, x264版本:20110627
代码如下:
// tutorial01.c
// Code based on a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
// A small sample program that shows how to use libavformat and libavcodec to
// read video from a file.
//
// Use
//
// gcc -o tutorial01 tutorial01.c -lavformat -lavcodec -lz
//
// to build (assuming libavformat and libavcodec are correctly installed
// your system).
//
// Run using
//
// tutorial01 myvideofile.mpg
//
// to write the first five frames from "myvideofile.mpg" to disk in PPM
// format.
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <SDL/SDL.h>
static int sws_flags = SWS_BICUBIC;
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;
// Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data
for(y=0; y<height; y++)
fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);
// Close file
fclose(pFile);
}
int main(int argc, char *argv[]) {
AVFormatContext *pFormatCtx;
int i, videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameRGB;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer;
if(argc < 2) {
printf("Please provide a movie file\n");
return -1;
}
// Register all formats and codecs
av_register_all();
// Open video file
if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
{
printf("error to open the file\n");
return -1; // Couldn't open file
}
printf("1\n");
// Retrieve stream information
if(av_find_stream_info(pFormatCtx)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
dump_format(pFormatCtx, 0, argv[1], 0);
// Find the first video stream
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// Read frames and save first five frames to disk
static struct SwsContext *img_convert_ctx;
if (img_convert_ctx == NULL)
{
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height,
// PIX_FMT_YUV420P,
PIX_FMT_RGB24,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL)
{
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
}
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
// img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, \
(AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, \
pCodecCtx->height);
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data,\
pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
// Save the frame to disk
if(++i<=5)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
i);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
return 0;
}
makefile为
all: ffmpeg_test ffplay
CXX=gcc
CXXFLAGS= -c -g -O0 -fPIC -I/usr/local/include -L/usr/local/lib
LIBS= -lavcodec -lavutil -lavfilter -lavformat -lavdevice -lswscale -lpthread -L/usr/local/lib
.c.o:
$(CXX) $(CXXFLAGS) $<
ffmpeg_test:ffmpeg.cpp
g++ -g -O0 -fPIC -I/usr/local/include -o $@ $^ $(LIBS)
ffplay:ffplay.cpp
g++ -g -O0 -fPIC -I/usr/local/include -o $@ $^ $(LIBS)
tutorial01:tutorial01.c
gcc -g -O0 -fPIC -I/usr/lcoal/include -o $@ $^ $(LIBS)
clean:
rm -f *.o
rm -f *~
rm -f ffmpeg_test
rm -f ffplay
rm -f *.ppm
rm -f tutotrial01
编译 make tutorial01
运行为 ./tutorial01 input.avi
如果没有错误即可产生5张图片。
本人不才,想取出帧后存为bmp图像,又比较懒,只是写了bmp文件的头,将pFrame->data[0]内容写入,最后产生的bmp图像的内容是上下相反的。代码如下,仅仅改变了存图像的saveframe函数:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <SDL/SDL.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavcodec/avcodec.h>
#pragma pack (1)
typedef struct BITMAPFILEHEADER
{
short bfType;
int bfSize;
short bfReserved1;
short bfReserved2;
int bfOffBits;
} BITMAPFILEHEADER;
#pragma pack ()
typedef struct BITMAPINFOHEADER
{
int biSize;
long biWidth;
long biHeight;
short biPlanes;
short biBitCount;
int biCompression;
int biSizeImage;
long biXPelsPerMeter;
long biYPelsPerMeter;
int biClrUsed;
int biClrImportant;
} BITMAPINFOHEADER;
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
FILE *pFile;
char szFilename[32];
int y;
struct BITMAPFILEHEADER bfh;
struct BITMAPINFOHEADER bih;
// Open file
sprintf(szFilename, "frame%d.bmp", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;
bfh.bfType = 0x4D42;
bfh.bfSize = 14 + 40 + width * height;
bfh.bfOffBits = 14 + 40;
bfh.bfReserved1 = 0;
bfh.bfReserved2 = 0;
fwrite (&bfh, 14, 1, pFile); //write the bmp header 'BM'(ASCII:0x4D42).
bih.biBitCount = 24;
bih.biWidth = width;
bih.biHeight = height;
bih.biSizeImage = width * height *3;
bih.biClrImportant = 0;
bih.biClrUsed = 0;
bih.biCompression = 0;
bih.biPlanes = 1;
bih.biSize = 40;
bih.biXPelsPerMeter = 0;
bih.biYPelsPerMeter = 0;
fwrite (&bih, 40, 1, pFile); //write the bit map info header.
fwrite ( pFrame->data[0] , width * height *3 , 1, pFile);
// Close file
fclose(pFile);
}
int main(int argc, char *argv[])
{
AVFormatContext *pFormatCtx;
int i,videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameRGB;
AVPacket packet;
int numBytes;
uint8_t *buffer;
int frameFinished;
if(argc < 2)
{
printf("please open a movie file\n");
exit(0);
}
av_register_all();
if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL) != 0)
{
printf("error to open the file\n");
exit(0);
}
if(av_find_stream_info(pFormatCtx) < 0)
{
printf("error to find the stream info of the file\n");
exit(0);
}
dump_format(pFormatCtx, 0, argv[1], 0);
videoStream = -1;
for(i=0; i < pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO){
videoStream = i;
break;
}
if(videoStream == -1)
{
printf("unsupported File format\n");
exit(0);
}
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec == NULL)
{
fprintf(stderr, "Unsupported codec\n");
exit(0);
}
if(avcodec_open(pCodecCtx, pCodec) < 0)
{
printf("error to open codec\n");
exit(0);
}
pFrame = avcodec_alloc_frame();
pFrameRGB = avcodec_alloc_frame();
if(pFrameRGB == NULL)
{
printf("unable to alloc memory for pFrameRGB\n");
exit(0);
}
numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
static struct SwsContext *img_convert_ctx;
if(img_convert_ctx == NULL)
{
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height,
PIX_FMT_RGB24,SWS_BICUBIC,
NULL,NULL,NULL
);
if(img_convert_ctx == NULL)
{
printf("error to initialize the conversion context \n");
exit(0);
}
}
i = 0;
while(av_read_frame(pFormatCtx, &packet) >= 0)
{
if(packet.stream_index == videoStream){
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
if(frameFinished){
sws_scale(img_convert_ctx, (const uint8_t* const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height,
pFrameRGB->data, pFrameRGB->linesize);
if(++i <= 5)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
}
}
av_free_packet(&packet);
}
av_free(buffer);
av_free(pFrameRGB);
av_free(pFrame);
avcodec_close(pCodecCtx);
av_close_input_file(pFormatCtx);
return 0;
}