ffmpeg 代码实现从视频中1秒截一张图

4 篇文章 0 订阅
2 篇文章 0 订阅

背景:我这个代码是在我的Android demo 里截取出来的,项目里是实现了从相册里选择一个视频,然后每秒生成一张截图;当然这个代码在非android 项目也是可以使用的。

详情可以参考我的项目:https://gitee.com/niangegelaile/Demo  里的ffmpeg 模块;

头文件:decode_video_to_img.h

#ifndef DEMO_DECODE_VIDEO_TO_IMG_H
#define DEMO_DECODE_VIDEO_TO_IMG_H


int createImg(char* inputFileName, char * outputFileName);
#endif //DEMO_DECODE_VIDEO_TO_IMG_H

C文件:decode_video_to_img.c

#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include "./ALog.h"


#define WORD uint16_t
#define DWORD uint32_t
#define LONG int32_t

#pragma pack(2)
typedef struct tagBITMAPFILEHEADER {
    WORD  bfType;
    DWORD bfSize;
    WORD  bfReserved1;
    WORD  bfReserved2;
    DWORD bfOffBits;
} BITMAPFILEHEADER, *PBITMAPFILEHEADER;


typedef struct tagBITMAPINFOHEADER {
    DWORD biSize;
    LONG  biWidth;
    LONG  biHeight;
    WORD  biPlanes;
    WORD  biBitCount;
    DWORD biCompression;
    DWORD biSizeImage;
    LONG  biXPelsPerMeter;
    LONG  biYPelsPerMeter;
    DWORD biClrUsed;
    DWORD biClrImportant;
} BITMAPINFOHEADER, *PBITMAPINFOHEADER;

int saveAsBitmap(AVFrame *pFrameRGB, int width, int height, char *fileName)
{
    FILE *pFile = NULL;
    BITMAPFILEHEADER bmpheader;
    BITMAPINFOHEADER bmpinfo;

    int bpp = 24;

    // open file

    pFile = fopen(fileName, "wb");
    if (!pFile)
        return 0;

    bmpheader.bfType = 0x4d42; //'BM';
    bmpheader.bfReserved1 = 0;
    bmpheader.bfReserved2 = 0;
    bmpheader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
    bmpheader.bfSize = bmpheader.bfOffBits + width*height*bpp/8;

    bmpinfo.biSize = sizeof(BITMAPINFOHEADER);
    bmpinfo.biWidth = width;
    bmpinfo.biHeight = -height; //reverse the image
    bmpinfo.biPlanes = 1;
    bmpinfo.biBitCount = bpp;
    bmpinfo.biCompression = 0;
    bmpinfo.biSizeImage = 0;
    bmpinfo.biXPelsPerMeter = 100;
    bmpinfo.biYPelsPerMeter = 100;
    bmpinfo.biClrUsed = 0;
    bmpinfo.biClrImportant = 0;

    fwrite(&bmpheader, sizeof(BITMAPFILEHEADER), 1, pFile);
    fwrite(&bmpinfo, sizeof(BITMAPINFOHEADER), 1, pFile);
    uint8_t *buffer = pFrameRGB->data[0];
    for (int h=0; h<height; h++)
    {
        for (int w=0; w<width; w++)
        {
            fwrite(buffer, 1, 1, pFile);
            fwrite(buffer+1, 1, 1, pFile);
            fwrite(buffer+2, 1, 1, pFile);
            buffer += 3;
        }
    }
    fclose(pFile);

    return 1;
}

void saveBMP(struct SwsContext *img_convert_ctx, AVFrame *frame, char *filename)
{
    //1 先进行转换,  YUV420=>RGB24:
    int w = frame->width;
    int h = frame->height;


    int numBytes=avpicture_get_size(AV_PIX_FMT_BGR24, w/3, h/3);
    uint8_t *buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    ALOGE("numbtyes:%d",numBytes);

    AVFrame *pFrameRGB = av_frame_alloc();
    /* buffer is going to be written to rawvideo file, no alignment */
    /*
    if (av_image_alloc(pFrameRGB->data, pFrameRGB->linesize,
                              w, h, AV_PIX_FMT_BGR24, pix_fmt, 1) < 0) {
        fprintf(stderr, "Could not allocate destination image\n");
        exit(1);
    }
    */
    avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, w/3, h/3);

    sws_scale(img_convert_ctx, frame->data, frame->linesize,
              0, h, pFrameRGB->data, pFrameRGB->linesize);

    saveAsBitmap(pFrameRGB,w/3,h/3,filename);


    //释放资源
    //av_free(buffer);
    av_freep(&pFrameRGB[0]);
    av_free(pFrameRGB);
}

static int decode_write_frame(AVStream *stream,const char *outfilename, AVCodecContext *avctx,
                       struct SwsContext *img_convert_ctx, AVFrame *frame,  AVPacket *pkt, int last)
{
    ALOGE("call decode_write_frame ");
    int len, got_frame;
    char buf[1024];
    double second= frame->pts * av_q2d(stream->time_base);
    ALOGE("frame->pts=%ld",frame->pts);
    ALOGE("秒=%lf",second);
    len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
    ALOGE("got_frame =%d",got_frame);
    ALOGE("len =%d",len);
    if (len < 0) {
        fprintf(stderr, "Error while decoding frame %lf\n", second);
        return len;
    }
    if (got_frame) {
        printf("Saving %sframe %lf\n", last ? "last " : "", second);
        fflush(stdout);
        ALOGE("outfilename=%s",outfilename);
        /* the picture is allocated by the decoder, no need to free it */
        snprintf(buf, sizeof(buf), "%s-%lf.bmp", outfilename, second);

        /*
        pgm_save(frame->data[0], frame->linesize[0],
                 frame->width, frame->height, buf);
        */

        ALOGE("buf=%s",buf);
        saveBMP(img_convert_ctx, frame, buf);
        return 1;

    }
    return 0;
}

int createImg(char* inputFileName, char * outputFileName)
{
    int ret;
    AVFormatContext *fmt_ctx = NULL;
    const AVCodec *codec;
    AVCodecContext *c= NULL;
    AVStream *st = NULL;
    int stream_index;
    AVFrame *frame;
    struct SwsContext *img_convert_ctx;
    AVPacket avpkt;
    /* register all formats and codecs */
    av_register_all();

    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, inputFileName, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", inputFileName);
        exit(1);
    }

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, inputFileName, 0);

    av_init_packet(&avpkt);

    ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not find %s stream in input file '%s'\n",
                av_get_media_type_string(AVMEDIA_TYPE_VIDEO), inputFileName);
        return ret;
    }

    stream_index = ret;
    st = fmt_ctx->streams[stream_index];

    /* find decoder for the stream */
    codec = avcodec_find_decoder(st->codecpar->codec_id);
    if (!codec) {
        fprintf(stderr, "Failed to find %s codec\n",
                av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
        return AVERROR(EINVAL);
    }

    c = avcodec_alloc_context3(NULL);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    /* Copy codec parameters from input stream to output codec context */
    if ((ret = avcodec_parameters_to_context(c, st->codecpar)) < 0) {
        fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
                av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
        return ret;
    }

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    img_convert_ctx = sws_getContext(c->width, c->height,
                                     c->pix_fmt,
                                     c->width/3, c->height/3,
                                     AV_PIX_FMT_BGR24,
                                     SWS_BICUBIC, NULL, NULL, NULL);

    if (img_convert_ctx == NULL)
    {
        fprintf(stderr, "Cannot initialize the conversion context\n");
        exit(1);
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    int from_seconds=0;
    ret=av_seek_frame(fmt_ctx, -1,from_seconds*AV_TIME_BASE+fmt_ctx->start_time, AVSEEK_FLAG_BACKWARD);
    ALOGE("av_seek_frame ret=%d",ret);
    while (ret>=0) {
        while((av_read_frame(fmt_ctx, &avpkt) )>= 0){
            ALOGE("av_read_frame ret=%d",ret);
            if(avpkt.stream_index == stream_index){
                int decodeResult=decode_write_frame(st,outputFileName,c,img_convert_ctx, frame, &avpkt, 0);
                if ( decodeResult< 0){
                    av_packet_unref(&avpkt);
                    goto end;
                }else if(decodeResult>0){
                    av_packet_unref(&avpkt);
                    break;
                } else{
                    av_packet_unref(&avpkt);
                }
            }
        }
        avpkt.data = NULL;
        avpkt.size = 0;
        from_seconds++;
        ALOGE("from_seconds =%d",from_seconds);
        if(from_seconds*AV_TIME_BASE+fmt_ctx->start_time>fmt_ctx->start_time+fmt_ctx->duration){
            goto end;
        }
        ret=av_seek_frame(fmt_ctx, -1,from_seconds*AV_TIME_BASE+fmt_ctx->start_time, AVSEEK_FLAG_BACKWARD);
    }
    end:
    avpkt.data = NULL;
    avpkt.size = 0;
    avformat_close_input(&fmt_ctx);
    sws_freeContext(img_convert_ctx);
    avcodec_free_context(&c);
    av_frame_free(&frame);
    return 0;
}

这个是构建一个方法供java调用:

extern "C"{
#include "./decode_video_to_img.h"
JNIEXPORT void JNICALL
Java_com_example_ffmpeg_JniUtil_createImg(JNIEnv *env, jclass clazz, jstring srcFile,
                                          jstring dstFile) {
    const char *src_file = env->GetStringUTFChars(srcFile, 0);
    const char *dst_file = env->GetStringUTFChars(dstFile, 0);
    createImg(const_cast<char *>(src_file), const_cast<char *>(dst_file));
    env->ReleaseStringUTFChars(srcFile, src_file);
    env->ReleaseStringUTFChars(dstFile, dst_file);
}
}

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
要在 C# 使用 ffmpeg 实现片转视频的功能,可以通过调用 ffmpeg 的命令行参数实现。以下是一个简单的示例代码: ```csharp using System.Diagnostics; public class ImageToVideoConverter { private Process process; private string ffmpegPath = "ffmpeg.exe"; // ffmpeg 可执行文件路径 private string imageFolder = "images"; // 片文件夹路径 private string videoName = "output.mp4"; // 输出视频文件名 public void Convert() { string arguments = "-y -framerate 30 -i " + imageFolder + "/%d.png -c:v libx264 -pix_fmt yuv420p " + videoName; ProcessStartInfo processInfo = new ProcessStartInfo(ffmpegPath, arguments); processInfo.CreateNoWindow = true; processInfo.UseShellExecute = false; process = Process.Start(processInfo); } public void Stop() { if (process != null && !process.HasExited) { process.Kill(); process = null; } } } ``` 在上述代码,Convert() 方法启动了一个新的进程,调用 ffmpeg 的命令行参数将指定文件夹片转换为视频,并以 libx264 编码格式进行压缩。参数 "-framerate 30" 表示以 30 帧的速度播放片,"-pix_fmt yuv420p" 则表示使用常见的 YUV420P 颜色空间格式。 请注意,上述代码仅适用于 Windows 系统下的 ffmpeg 可执行文件,如果使用其他系统或版本的 ffmpeg,需要相应地修改命令行参数。同时,需要确保指定的片文件夹所有片的命名方式是按数字顺序递增的,如 1.png、2.png、3.png 等。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值