ffmpeg c++ examples 理解

前言

ffmpeg 编译需要装很多三方库,还要配置

编译

ffmpeg

../configure --prefix=$DIR/ffmpeg_opt --enable-shared --enable-static \
 --extra-ldflags=-L$DIR/x264_opt/lib --extra-cflags=-I$DIR/x264_opt/include \
  --extra-cflags=' -pipe -O3 -fPIC' \
  --extra-cxxflags=' -pipe -O3 -fPIC'\
  --enable-pthreads --enable-zlib --enable-pic --enable-pthreads \
  --enable-gpl --enable-version3 --enable-hardcoded-tables --enable-libfreetype \
  --enable-optimizations --disable-doc \
  --enable-libx264

x264

../configure --prefix="/home/wangpengrui/bin/x264_opt" --enable-shared  --enable-static --enable-pic --extra-cflags='-O3'

opencv

cmake -D CMAKE_BUILD_TYPE=Release \
    -D CMAKE_INSTALL_PREFIX=$prefix\
    -D USE_O3=ON \
    -D ENABLE_CXX11=ON \
    -D WITH_TBB=ON \
    -D WITH_IPP=ON \
    -D WITH_OPENMP=ON \
    -D WITH_WEBP=OFF \
    -D BUILD_TIFF=ON \
    -D ENABLE_FAST_MATH=ON \
    -D BUILD_EXAMPLES=OFF \
    -D BUILD_DOCS=OFF \
    -D BUILD_PERF_TESTS=OFF \
    -D BUILD_TESTS=OFF ..

测试使用时大致过程如下

Dffmpeg="xxx/bin/ffmpeg"
inp_ffmpeg="$Dffmpeg/include/"
lib_ffmpeg="$Dffmpeg/lib/"
lib_mp3="xxx/libmp3lame/lib" # 其实我把h264也放这里了
g++ -std=c++11 -I $inp_ffmpeg src/$NM.cpp -L $lib_ffmpeg -L $lib_mp3 -lavdevice -lavfilter -lavformat -lavcodec -lswresample -lswscale -lavutil -lx264 -o bin/$NM

echo "Will running ..."
export LD_LIBRARY_PATH="$lib_ffmpeg:$lib_mp3:$LD_LIBRARY_PATH"
# bin/$NM  tmp/a.mp4 "libx264" 运行 

encode_video

frame 接收的是原始数据,这里随机生成了,然后发送给 package(应该是先发送给 context,这里面指定了编码格式,然后再给了package),最后用这个数据保存。

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C"{
#include <libavcodec/avcodec.h>

#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}

// fix temporary array error in c++1x
#ifdef av_err2str
#undef av_err2str
av_always_inline char* av_err2str(int errnum)
{
    // static char str[AV_ERROR_MAX_STRING_SIZE];
    // thread_local may be better than static in multi-thread circumstance
    thread_local char str[AV_ERROR_MAX_STRING_SIZE]; 
    memset(str, 0, sizeof(str));
    return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, errnum);
}
#endif

static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
                   FILE *outfile)
{
    int ret;

    /* send the frame to the encoder */
    if (frame)
        printf("Send frame %3" PRId64 "\n", frame->pts);

    ret = avcodec_send_frame(enc_ctx, frame);
    if (ret < 0) {
        fprintf(stderr, "Error sending a frame for encoding\n");
        exit(1);
    }

    while (ret >= 0) {
        ret = avcodec_receive_packet(enc_ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            fprintf(stderr, "Error during encoding\n");
            exit(1);
        }

        printf("Write packet %3" PRId64 " (size=%5d)\n", pkt->pts, pkt->size);
        fwrite(pkt->data, 1, pkt->size, outfile);
        av_packet_unref(pkt);
    }
}

int main(int argc, char **argv)
{
    const char *filename, *codec_name;
    const AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, ret, x, y;
    FILE *f;
    AVFrame *frame;
    AVPacket *pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    if (argc <= 2) {
        fprintf(stderr, "Usage: %s <output file> <codec name>\n", argv[0]);
        exit(0);
    }
    filename = argv[1];
    codec_name = argv[2];

    /* find the mpeg1video encoder */
    codec = avcodec_find_encoder_by_name(codec_name);
    if (!codec) {
        fprintf(stderr, "Codec '%s' not found\n", codec_name);
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    pkt = av_packet_alloc();
    if (!pkt)
        exit(1);

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
    c->time_base = (AVRational){1, 25};
    c->framerate = (AVRational){25, 1};

    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    c->gop_size = 10;
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    if (codec->id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    ret = avcodec_open2(c, codec, NULL);
    if (ret < 0) {
        //fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
        fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;

    ret = av_frame_get_buffer(frame, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate the video frame data\n");
        exit(1);
    }

    /* encode 1 second of video */
    for (i = 0; i < 25; i++) {
        fflush(stdout);

        /* make sure the frame data is writable */
        ret = av_frame_make_writable(frame);
        if (ret < 0)
            exit(1);

        /* prepare a dummy image */
        /* Y */
        for (y = 0; y < c->height; y++) {
            for (x = 0; x < c->width; x++) {
                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
            }
        }

        /* Cb and Cr */
        for (y = 0; y < c->height/2; y++) {
            for (x = 0; x < c->width/2; x++) {
                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        frame->pts = i;

        /* encode the image */
        encode(c, frame, pkt, f);
    }

    /* flush the encoder */
    encode(c, NULL, pkt, f);

    /* add sequence end code to have a real MPEG file */
    if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
        fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_free_context(&c);
    av_frame_free(&frame);
    av_packet_free(&pkt);

    return 0;
}

video2img

把视频保存成图片

extern "C"{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libavutil/channel_layout.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
#include <libavutil/audio_fifo.h>
}

// 这个可以运行,前提是 视频频中不能有 音频
int saveJpg(AVFrame *pFrame, char *out_name) {

    int width = pFrame->width;
    int height = pFrame->height;
    AVCodecContext *pCodeCtx = NULL;


    AVFormatContext *pFormatCtx = avformat_alloc_context();
    // 设置输出文件格式
    pFormatCtx->oformat = av_guess_format("mjpeg", NULL, NULL);

    // 创建并初始化输出AVIOContext
    if (avio_open(&pFormatCtx->pb, out_name, AVIO_FLAG_READ_WRITE) < 0) {
        printf("Couldn't open output file.");
        return -1;
    }

    // 构建一个新stream
    AVStream *pAVStream = avformat_new_stream(pFormatCtx, 0);
    if (pAVStream == NULL) {
        return -1;
    }

    AVCodecParameters *parameters = pAVStream->codecpar;
    parameters->codec_id = pFormatCtx->oformat->video_codec;
    parameters->codec_type = AVMEDIA_TYPE_VIDEO;
    parameters->format = AV_PIX_FMT_YUVJ420P;
    parameters->width = pFrame->width;
    parameters->height = pFrame->height;

    AVCodec *pCodec = avcodec_find_encoder(pAVStream->codecpar->codec_id);

    if (!pCodec) {
        printf("Could not find encoder\n");
        return -1;
    }

    pCodeCtx = avcodec_alloc_context3(pCodec);
    if (!pCodeCtx) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    if ((avcodec_parameters_to_context(pCodeCtx, pAVStream->codecpar)) < 0) {
        fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
                av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
        return -1;
    }

    pCodeCtx->time_base = (AVRational) {1, 25};

    if (avcodec_open2(pCodeCtx, pCodec, NULL) < 0) {
        printf("Could not open codec.");
        return -1;
    }

    int ret = avformat_write_header(pFormatCtx, NULL);
    if (ret < 0) {
        printf("write_header fail\n");
        return -1;
    }

    int y_size = width * height;

    //Encode
    // 给AVPacket分配足够大的空间
    AVPacket pkt;
    av_new_packet(&pkt, y_size * 3);

    // 编码数据
    ret = avcodec_send_frame(pCodeCtx, pFrame);
    if (ret < 0) {
        printf("Could not avcodec_send_frame.");
        return -1;
    }

    // 得到编码后数据
    ret = avcodec_receive_packet(pCodeCtx, &pkt);
    if (ret < 0) {
        printf("Could not avcodec_receive_packet");
        return -1;
    }

    ret = av_write_frame(pFormatCtx, &pkt);

    if (ret < 0) {
        printf("Could not av_write_frame");
        return -1;
    }

    av_packet_unref(&pkt);

    //Write Trailer
    av_write_trailer(pFormatCtx);


    avcodec_close(pCodeCtx);
    avio_close(pFormatCtx->pb);
    avformat_free_context(pFormatCtx);

    return 0;
}

int main(int argc, char *argv[]) {
    int ret;
    const char *in_filename, *out_filename;
    AVFormatContext *fmt_ctx = NULL;

    const AVCodec *codec;
    AVCodecContext *codeCtx = NULL;

    AVStream *stream = NULL;
    int stream_index;

    AVPacket avpkt;

    int frame_count;
    AVFrame *frame;


    if (argc <= 2) {
        printf("Usage: %s <input file> <output file>\n", argv[0]);
        exit(0);
    }
    in_filename = argv[1];
    out_filename = argv[2];

    // 1
    if (avformat_open_input(&fmt_ctx, in_filename, NULL, NULL) < 0) {
        printf("Could not open source file %s\n", in_filename);
        exit(1);
    }

    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        printf("Could not find stream information\n");
        exit(1);
    }

    av_dump_format(fmt_ctx, 0, in_filename, 0);

    av_init_packet(&avpkt);
    avpkt.data = NULL;
    avpkt.size = 0;

     // 2
    stream_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not find %s stream in input file '%s'\n",
                av_get_media_type_string(AVMEDIA_TYPE_VIDEO), in_filename);
        return ret;
    }

    stream = fmt_ctx->streams[stream_index];

    // 3
    codec = avcodec_find_decoder(stream->codecpar->codec_id);
    if (codec == NULL) {
        return -1;
    }

    // 4
    codeCtx = avcodec_alloc_context3(NULL);
    if (!codeCtx) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }


    // 5
    if ((ret = avcodec_parameters_to_context(codeCtx, stream->codecpar)) < 0) {
        fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
                av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
        return ret;
    }

    // 6
    avcodec_open2(codeCtx, codec, NULL);


    //初始化frame,解码后数据
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    frame_count = 0;
    char buf[1024];
    // 7
    while (av_read_frame(fmt_ctx, &avpkt) >= 0) {
        if (avpkt.stream_index == stream_index) {
            // 8
            int re = avcodec_send_packet(codeCtx, &avpkt);
            if (re < 0) {
                continue;
            }

            // 9 这里必须用while(),因为一次avcodec_receive_frame可能无法接收到所有数据
            while (avcodec_receive_frame(codeCtx, frame) == 0) {
                // 拼接图片路径、名称
                snprintf(buf, sizeof(buf), "%s/Demo-%d.jpg", out_filename, frame_count);
                saveJpg(frame, buf); //保存为jpg图片
            }

            frame_count++;
        }
        av_packet_unref(&avpkt);
    }

}

  • 3
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
要将图片推送为RTSP视频流,需要使用FFmpeg和libx264编码器。以下是一个C语言示例程序,说明如何将单个图片推送为RTSP视频流。 首先,需要包含必要的头文件: ```c #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #include <unistd.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <libavutil/opt.h> #include <libavformat/avformat.h> #include <libswscale/swscale.h> ``` 然后,需要定义一些常量和变量: ```c #define WIDTH 640 #define HEIGHT 480 #define FPS 30 #define BITRATE 500000 AVFormatContext *fmt_ctx = NULL; AVStream *video_stream = NULL; AVCodecContext *codec_ctx = NULL; AVCodec *codec = NULL; AVPacket pkt; AVFrame *frame = NULL; uint8_t *frame_buffer = NULL; struct SwsContext *sws_ctx = NULL; int sockfd = -1; struct sockaddr_in server_addr; ``` 接下来,需要初始化FFmpeg库和网络套接字: ```c av_register_all(); avformat_network_init(); if (avformat_alloc_output_context2(&fmt_ctx, NULL, "rtsp", "rtsp://127.0.0.1:8554/live") < 0) { fprintf(stderr, "Error allocating output context.\n"); exit(1); } codec = avcodec_find_encoder(AV_CODEC_ID_H264); if (!codec) { fprintf(stderr, "Codec not found.\n"); exit(1); } video_stream = avformat_new_stream(fmt_ctx, codec); if (!video_stream) { fprintf(stderr, "Failed to create new stream.\n"); exit(1); } codec_ctx = video_stream->codec; codec_ctx->codec_id = AV_CODEC_ID_H264; codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO; codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P; codec_ctx->width = WIDTH; codec_ctx->height = HEIGHT; codec_ctx->time_base = (AVRational){1, FPS}; codec_ctx->bit_rate = BITRATE; codec_ctx->gop_size = FPS; codec_ctx->max_b_frames = 0; codec_ctx->qmin = 10; codec_ctx->qmax = 51; codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; if (avcodec_open2(codec_ctx, codec, NULL) < 0) { fprintf(stderr, "Failed to open codec.\n"); exit(1); } frame = av_frame_alloc(); frame->format = codec_ctx->pix_fmt; frame->width = codec_ctx->width; frame->height = codec_ctx->height; av_frame_get_buffer(frame, 0); frame_buffer = (uint8_t *)malloc(avpicture_get_size(codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height)); avpicture_fill((AVPicture *)frame, frame_buffer, codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height); sws_ctx = sws_getContext(WIDTH, HEIGHT, AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL); sockfd = socket(AF_INET, SOCK_DGRAM, 0); if (sockfd < 0) { fprintf(stderr, "Failed to create socket.\n"); exit(1); } memset(&server_addr, 0, sizeof(server_addr)); server_addr.sin_family = AF_INET; server_addr.sin_port = htons(8554); server_addr.sin_addr.s_addr = inet_addr("127.0.0.1"); ``` 接下来,需要循环读取图片并将其编码为H.264流: ```c while (1) { AVFrame *rgb_frame = av_frame_alloc(); uint8_t *rgb_frame_buffer = (uint8_t *)malloc(WIDTH * HEIGHT * 3); FILE *fp = fopen("image.jpg", "rb"); fread(rgb_frame_buffer, 1, WIDTH * HEIGHT * 3, fp); fclose(fp); avpicture_fill((AVPicture *)rgb_frame, rgb_frame_buffer, AV_PIX_FMT_RGB24, WIDTH, HEIGHT); sws_scale(sws_ctx, rgb_frame->data, rgb_frame->linesize, 0, HEIGHT, frame->data, frame->linesize); av_frame_free(&rgb_frame); free(rgb_frame_buffer); int ret = avcodec_send_frame(codec_ctx, frame); if (ret < 0) { fprintf(stderr, "Error sending frame to encoder.\n"); exit(1); } while (ret >= 0) { ret = avcodec_receive_packet(codec_ctx, &pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { break; } else if (ret < 0) { fprintf(stderr, "Error receiving packet from encoder.\n"); exit(1); } pkt.stream_index = video_stream->index; av_packet_rescale_ts(&pkt, codec_ctx->time_base, video_stream->time_base); av_interleaved_write_frame(fmt_ctx, &pkt); sendto(sockfd, pkt.data, pkt.size, 0, (struct sockaddr *)&server_addr, sizeof(server_addr)); av_packet_unref(&pkt); } usleep(1000000 / FPS); } ``` 最后,需要释放资源并关闭套接字: ```c av_write_trailer(fmt_ctx); avcodec_free_context(&codec_ctx); av_frame_free(&frame); sws_freeContext(sws_ctx); free(frame_buffer); close(sockfd); avformat_free_context(fmt_ctx); avformat_network_deinit(); ``` 完整的示例程序可以在以下链接中找到:https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/encode_video.c
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值