Tutorial 01: Making Screencaps

ffmpeg的ffplay.c的例子很庞大,但是网上有简化的例子。本文参考的链接里面的例子很不错,但是不是最新版本的,可以去

https://github.com/chelyaev/ffmpeg-tutorial

里面下载最新的版本。


我运行此代码时的ffmpeg和SDL的环境为:

ffmpeg2.7.1

SDL1.2


将代码格式化后如下:



// tutorial01.c
//
// This tutorial was written by Stephen Dranger (dranger@gmail.com).
//
// Code based on a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1

// A small sample program that shows how to use libavformat and libavcodec to
// read video from a file.
//
// Use the Makefile to build all examples.
//
// Run using
//
// tutorial01 myvideofile.mpg
//
// to write the first five frames from "myvideofile.mpg" to disk in PPM
// format.


extern "C"
{
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/dict.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "libswresample/swresample.h"

#include "SDL1.2/SDL.h"
#include "SDL1.2/SDL_thread.h"
}

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "SDL.lib")


#include <stdio.h>

void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
    FILE *pFile;
    char szFilename[32];
    int  y;

    // Open file
    sprintf(szFilename, "E:\\temp\\ppm\\frame%d.ppm", iFrame);
    pFile=fopen(szFilename, "wb");
    if(pFile==NULL)
        return;

    // Write header
    fprintf(pFile, "P6\n%d %d\n255\n", width, height);

    // Write pixel data
    for(y=0; y<height; y++)
        fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);

    // Close file
    fclose(pFile);
}

int _tmain() {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVFrame         *pFrameRGB = NULL;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer = NULL;

    AVDictionary    *optionsDict = NULL;
    struct SwsContext      *sws_ctx = NULL;

    char szFile[] = "song.flv";
    //if(argc < 2) {
    //    printf("Please provide a movie file\n");
    //    return -1;
    //}
    // Register all formats and codecs
    av_register_all();

    // Open video file
    if( avformat_open_input(&pFormatCtx, szFile, NULL, NULL) != 0 ) {
        return -1; // Couldn't open file
    }

    // Retrieve stream information
    if( avformat_find_stream_info(pFormatCtx, NULL) < 0 ) {
        return -1; // Couldn't find stream information
    }

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, szFile, 0);

    // Find the first video stream
    videoStream = -1;
    for( i = 0; i < pFormatCtx->nb_streams; i ++) {
        if( pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO ) {
            videoStream = i;
            break;
        }
    }
    if( videoStream == -1 ) {
        return -1; // Didn't find a video stream
    }
    
    // Get a pointer to the codec context for the video stream
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if( pCodec == NULL ) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }

    // Open codec, 打开解码器其实就是将pCodec赋值给pCodecCtx的成员变量codec
    if( avcodec_open2(pCodecCtx, pCodec, &optionsDict) < 0 ) {
        return -1; // Could not open codec
    }

    // Allocate video frame
    pFrame = av_frame_alloc();

    // Allocate an AVFrame structure
    pFrameRGB = av_frame_alloc();
    if( pFrameRGB == NULL ) {
        return -1;
    }

    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
        pCodecCtx->height);
    buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    sws_ctx =
        sws_getContext
        (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        PIX_FMT_RGB24,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
        );

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
        pCodecCtx->width, pCodecCtx->height);

    // Read frames and save first five frames to disk
    i = 0;
    while( av_read_frame(pFormatCtx, &packet) >= 0 ) {
        // Is this a packet from the video stream?
        if( packet.stream_index == videoStream ) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
                &packet);

            // Did we get a video frame?
            if( frameFinished ) {
                // Convert the image from its native format to RGB
                sws_scale
                    (
                    sws_ctx,
                    (uint8_t const * const *)pFrame->data,
                    pFrame->linesize,
                    0,
                    pCodecCtx->height,
                    pFrameRGB->data,
                    pFrameRGB->linesize
                    );

                // Save the frame to disk
                if( ++i <= 5 ) {
                    SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
                    i);
                }
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }

    // Free the RGB image
    av_free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}

几个重要函数讲解一下:

avformat_open_input: 打开输入流的头部,保存到AVFormatContext,此时编解码器并没有打开,程序退出前,必须使用avformat_close_input()关闭打开的AVFormatContext。

avformat_find_stream_info: avformat_open_input只是打开了流的头部,并没有打开流,我们需要使用此函数打开流。


avcodec_find_decoder

avcodec_open2

获取解码器,这里打开解码器其实就是将pCodec赋值给pCodecCtx的成员变量codec


avpicture_get_size: 因为我们需要RGB格式的帧数据,所以需要确定RGB格式的图片尺寸。


sws_getContext: 获取转换上一下,根据该上下文可以将解码得到的格式数据转化成RGB格式的图片数据


av_read_frame: 循环读取每一个帧


avcodec_decode_video2: 解码每一个帧,需要注意的是因为帧是分I,P,B的,所以有时候avcodec_decode_video2返回的值不一定>0


sws_scale: YUV转RGB


SaveFrame: 将RGB数据保存为ppm格式



在这个例子的基础上,还可以将数据保存为jpg, bmp, yuv等格式。。



参考:http://dranger.com/ffmpeg/tutorial01.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值