c++调用ffmpeg api录屏 并进行rtmp推流

代码及工程见https://download.csdn.net/download/daqinzl/88156528

开发工具:visual studio 2019

记得启动rtmp流媒体服务 nginx的rtmp服务见https://download.csdn.net/download/daqinzl/20478812

播放,采用ffmpeg工具集里的ffplay.exe, 执行命令 ffplay rtmp://192.168.0.105:1935/live/desktop

主要代码如下:


#include "pch.h"
#include <iostream>
using namespace std;

#include <stdio.h>

#define __STDC_CONSTANT_MACROS

extern "C"
{
#include "include/libavcodec/avcodec.h"
#include "include/libavformat/avformat.h"
#include "include/libswscale/swscale.h"
#include "include/libavdevice/avdevice.h"
#include "include/libavutil/imgutils.h"
#include "include/libavutil/opt.h"
#include "include/libavutil/imgutils.h"
#include "include/libavutil/mathematics.h"
#include "include/libavutil/time.h"
};


#pragma comment (lib,"avcodec.lib")
#pragma comment (lib,"avdevice.lib")
#pragma comment (lib,"avfilter.lib")
#pragma comment (lib,"avformat.lib")
#pragma comment (lib,"avutil.lib")
#pragma comment (lib,"swresample.lib")
#pragma comment (lib,"swscale.lib")


int main(int argc, char* argv[])
{

            AVFormatContext* m_fmt_ctx = NULL;
        AVInputFormat* m_input_fmt = NULL;
        int video_stream = -1;
        avdevice_register_all();
        avcodec_register_all();
        const char* deviceName =  "desktop";
        const char* inputformat = "gdigrab";
        int FPS = 23;  //15
        m_fmt_ctx = avformat_alloc_context();
        m_input_fmt = av_find_input_format(inputformat);
        AVDictionary* deoptions = NULL;
        av_dict_set_int(&deoptions, "framerate", FPS, AV_DICT_MATCH_CASE);
        av_dict_set_int(&deoptions, "rtbufsize", 3041280 * 100 * 5, 0);
        int ret = avformat_open_input(&m_fmt_ctx, deviceName, m_input_fmt, &deoptions);
        if (ret != 0) {
            
                return ret;
        }
        av_dict_free(&deoptions);
        ret = avformat_find_stream_info(m_fmt_ctx, NULL);
        if (ret < 0) {
            
                return ret;
        }
        av_dump_format(m_fmt_ctx, 0, deviceName, 0);
        video_stream = av_find_best_stream(m_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
        if (video_stream < 0) {
            
                return -1;
        }

        AVCodecContext * _codec_ctx = m_fmt_ctx->streams[video_stream]->codec;
        AVCodec* _codec = avcodec_find_decoder(_codec_ctx->codec_id);
        if (_codec == NULL) {
            
                return -1;
        }
        ret = avcodec_open2(_codec_ctx, _codec, NULL);
        if (ret != 0) {
            
                return -1;
        }
        int width = m_fmt_ctx->streams[video_stream]->codec->width;
        int height = m_fmt_ctx->streams[video_stream]->codec->height;
        int fps = m_fmt_ctx->streams[video_stream]->codec->framerate.num > 0 ? m_fmt_ctx->streams[video_stream]->codec->framerate.num : 25;
        AVPixelFormat videoType = m_fmt_ctx->streams[video_stream]->codec->pix_fmt;
        std::cout << "avstream timebase : " << m_fmt_ctx->streams[video_stream]->time_base.num << " / " << m_fmt_ctx->streams[video_stream]->time_base.den << endl;
            AVDictionary* enoptions = 0;
        av_dict_set(&enoptions, "preset", "superfast", 0);
        av_dict_set(&enoptions, "tune", "zerolatency", 0);
        AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
        if (!codec)
        {
            
                std::cout << "avcodec_find_encoder failed!" << endl;
            return NULL;
        }
        AVCodecContext* vc = avcodec_alloc_context3(codec);
        if (!vc)
        {
            
                std::cout << "avcodec_alloc_context3 failed!" << endl;
            return NULL;
        }
        std::cout << "avcodec_alloc_context3 success!" << endl;
        vc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        vc->codec_id = AV_CODEC_ID_H264;
        vc->codec_type = AVMEDIA_TYPE_VIDEO;
        vc->pix_fmt = AV_PIX_FMT_YUV420P;
        vc->width = width;
        vc->height = height;
        vc->time_base.num = 1;
        vc->time_base.den = FPS;
        vc->framerate = { FPS,1 };
        vc->bit_rate = 10241000;
        vc->gop_size = 120;
        vc->qmin = 10;
        vc->qmax = 51;
        vc->max_b_frames = 0;
        vc->profile = FF_PROFILE_H264_MAIN;
        ret = avcodec_open2(vc, codec, &enoptions);
        if (ret != 0)
        {
            return ret;
        }
        std::cout << "avcodec_open2 success!" << endl;
        av_dict_free(&enoptions);
        SwsContext *vsc = nullptr;
        vsc = sws_getCachedContext(vsc,
            width, height, (AVPixelFormat)videoType, //源宽、高、像素格式
            width, height, AV_PIX_FMT_YUV420P,//目标宽、高、像素格式
            SWS_BICUBIC, // 尺寸变化使用算法
            0, 0, 0
        );
        if (!vsc)
        {
            
                cout << "sws_getCachedContext failed!";
            return false;
        }
        AVFrame* yuv = av_frame_alloc();
        yuv->format = AV_PIX_FMT_YUV420P;
        yuv->width = width;
        yuv->height = height;
        yuv->pts = 0;
        ret = av_frame_get_buffer(yuv, 32);
        if (ret != 0)
        {
            
                return ret;
        }
        const char* rtmpurl = "rtmp://192.168.0.105:1935/live/desktop";
       
        AVFormatContext * ic = NULL;
        ret = avformat_alloc_output_context2(&ic, 0, "flv", rtmpurl);
        if (ret < 0)
        {
            
                return ret;
        }
        AVStream* st = avformat_new_stream(ic, NULL);
        if (!st)
        {
            
                return -1;
        }
        st->codecpar->codec_tag = 0;
        avcodec_parameters_from_context(st->codecpar, vc);
        av_dump_format(ic, 0, rtmpurl, 1);
        ret = avio_open(&ic->pb, rtmpurl, AVIO_FLAG_WRITE);
        if (ret != 0)
        {
            
                return ret;
        }
        ret = avformat_write_header(ic, NULL);
        if (ret != 0)
        {
            
                return ret;
        }
        AVPacket* packet = av_packet_alloc();
        AVPacket* Encodepacket = av_packet_alloc();
        int frameIndex = 0;
        int EncodeIndex = 0;
        AVFrame* rgb = av_frame_alloc();
        AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
        long long startpts = m_fmt_ctx->start_time;
        long long lastpts = 0;
        long long duration = av_rescale_q(1, { 1,FPS }, { 1,AV_TIME_BASE });
        int got_picture = 0;
        while (frameIndex < 2000000)
        {
            
                ret = av_read_frame(m_fmt_ctx, packet);
            if (ret < 0) {
                
                    break;
            }
            if (packet->stream_index == video_stream)
            {
                
                    ret = avcodec_decode_video2(_codec_ctx, rgb, &got_picture, packet);
                if (ret < 0) {
                    
                        printf("Decode Error.\n");
                    return ret;
                }
                if (got_picture) {
                    
                        int h = sws_scale(vsc, rgb->data, rgb->linesize, 0, height, //源数据
                            yuv->data, yuv->linesize);
                    int guesspts = frameIndex * duration;
                    yuv->pts = guesspts;
                    frameIndex++;
                    ret = avcodec_encode_video2(vc, Encodepacket, yuv, &got_picture);
                    if (ret < 0) {
                        
                            printf("Failed to encode!\n");
                        break;
                    }
                    if (got_picture == 1) {
                        
                            Encodepacket->pts = av_rescale_q(EncodeIndex, vc->time_base, st->time_base);
                        Encodepacket->dts = Encodepacket->pts;
                        std::cout << "frameindex : " << EncodeIndex << " pts : " << Encodepacket->pts << " dts: " << Encodepacket->dts << " encodeSize:" << Encodepacket->size << " curtime - lasttime " << Encodepacket->pts - lastpts << endl;
                            lastpts = Encodepacket->pts;
                        ret = av_interleaved_write_frame(ic, Encodepacket);
                        EncodeIndex++;
                        av_packet_unref(Encodepacket);
                    }
                }
            }
            av_packet_unref(packet);
        }
        ret = avcodec_send_frame(vc, NULL);
        while (ret >= 0) {
            
                ret = avcodec_receive_packet(vc, Encodepacket);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                
                    break;
            }
            if (ret < 0) {
                
                    break;
            }
            ret = av_interleaved_write_frame(ic, Encodepacket);
            EncodeIndex++;
        }
        av_write_trailer(ic);
        av_packet_free(&packet);
        av_packet_free(&Encodepacket);
        av_frame_free(&rgb);
        av_frame_free(&yuv);
        av_bitstream_filter_close(h264bsfc);
        h264bsfc = NULL;
        if (vsc)
        {
            
                sws_freeContext(vsc);
            vsc = NULL;
        }
        if (_codec_ctx)
            avcodec_close(_codec_ctx);
        _codec_ctx = NULL;
        _codec = NULL;
        if (vc)
            avcodec_free_context(&vc);
        if (m_fmt_ctx)
            avformat_close_input(&m_fmt_ctx);
        if (ic && !(ic->flags & AVFMT_NOFILE))
            avio_closep(&ic->pb);
        if (ic) {
            
                avformat_free_context(ic);
            ic = NULL;
        }
        m_input_fmt = NULL;
        return 0;
    }
    

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
以下是一个简单的示例代码,展示如何使用FFmpeg 6.0在C++录屏并将其推流RTMP服务器: ```cpp #include <iostream> #include <cstdlib> #include <chrono> #include <thread> extern "C" { #include <libavutil/avutil.h> #include <libavutil/opt.h> #include <libavutil/time.h> #include <libavformat/avformat.h> #include <libavcodec/avcodec.h> #include <libswscale/swscale.h> #include <libswresample/swresample.h> #include <libavutil/imgutils.h> #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/extensions/XShm.h> } #define STREAM_URL "rtmp://example.com/live/stream" int main() { // Initialize X11 display Display *disp = XOpenDisplay(NULL); if (!disp) { std::cerr << "Error: Could not open X11 display." << std::endl; return EXIT_FAILURE; } int screen = DefaultScreen(disp); Window root = RootWindow(disp, screen); // Get screen dimensions int width = XDisplayWidth(disp, screen); int height = XDisplayHeight(disp, screen); // Create XImage and XShmImage structures XImage *ximg = XGetImage(disp, root, 0, 0, width, height, AllPlanes, ZPixmap); XShmSegmentInfo shminfo; XShmCreateImage(disp, root, ZPixmap, 0, ximg->width, ximg->height, ximg->depth, &shminfo, 0); shminfo.shmaddr = (char *)shmat(shminfo.shmid, 0, 0); shminfo.readOnly = False; XShmAttach(disp, &shminfo); XSync(disp, False); // Allocate AVFrame for video data AVFrame *frame = av_frame_alloc(); if (!frame) { std::cerr << "Error: Could not allocate AVFrame." << std::endl; return EXIT_FAILURE; } frame->width = width; frame->height = height; frame->format = AV_PIX_FMT_RGB24; if (av_frame_get_buffer(frame, 32) < 0) { std::cerr << "Error: Could not allocate video frame data." << std::endl; return EXIT_FAILURE; } // Initialize FFmpeg av_register_all(); avcodec_register_all(); avformat_network_init(); // Open output context AVFormatContext *outctx = nullptr; if (avformat_alloc_output_context2(&outctx, nullptr, "flv", STREAM_URL) < 0) { std::cerr << "Error: Could not allocate output context." << std::endl; return EXIT_FAILURE; } if (avio_open2(&outctx->pb, STREAM_URL, AVIO_FLAG_WRITE, nullptr, nullptr) < 0) { std::cerr << "Error: Could not open output URL." << std::endl; return EXIT_FAILURE; } // Add video stream AVStream *vstream = avformat_new_stream(outctx, nullptr); if (!vstream) { std::cerr << "Error: Could not allocate video stream." << std::endl; return EXIT_FAILURE; } vstream->id = 0; vstream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; vstream->codecpar->codec_id = AV_CODEC_ID_H264; vstream->codecpar->width = width; vstream->codecpar->height = height; vstream->codecpar->format = AV_PIX_FMT_YUV420P; vstream->codecpar->bit_rate = 400000; vstream->codecpar->profile = FF_PROFILE_H264_BASELINE; // Find encoder AVCodec *vcodec = avcodec_find_encoder(vstream->codecpar->codec_id); if (!vcodec) { std::cerr << "Error: Could not find video encoder." << std::endl; return EXIT_FAILURE; } // Open video encoder AVCodecContext *vctx = avcodec_alloc_context3(vcodec); if (!vctx) { std::cerr << "Error: Could not allocate video encoder context." << std::endl; return EXIT_FAILURE; } if (avcodec_parameters_to_context(vctx, vstream->codecpar) < 0) { std::cerr << "Error: Could not initialize video encoder context." << std::endl; return EXIT_FAILURE; } vctx->bit_rate = 400000; vctx->time_base = {1, 25}; vctx->gop_size = 10; if (vstream->codecpar->codec_id == AV_CODEC_ID_H264) { av_opt_set(vctx->priv_data, "preset", "ultrafast", 0); av_opt_set(vctx->priv_data, "tune", "zerolatency", 0); } if (avcodec_open2(vctx, vcodec, nullptr) < 0) { std::cerr << "Error: Could not open video encoder." << std::endl; return EXIT_FAILURE; } // Allocate AVPacket for video data AVPacket *vpacket = av_packet_alloc(); if (!vpacket) { std::cerr << "Error: Could not allocate video packet." << std::endl; return EXIT_FAILURE; } // Allocate AVFrame for video data after conversion to YUV420P AVFrame *vframe = av_frame_alloc(); if (!vframe) { std::cerr << "Error: Could not allocate video frame." << std::endl; return EXIT_FAILURE; } vframe->width = width; vframe->height = height; vframe->format = vctx->pix_fmt; if (av_frame_get_buffer(vframe, 32) < 0) { std::cerr << "Error: Could not allocate video frame data." << std::endl; return EXIT_FAILURE; } // Initialize swscale context for converting RGB to YUV420P SwsContext *swsctx = sws_getContext(width, height, AV_PIX_FMT_RGB24, width, height, vctx->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr); if (!swsctx) { std::cerr << "Error: Could not initialize swscale context." << std::endl; return EXIT_FAILURE; } // Write header to output context avformat_write_header(outctx, nullptr); // Read and encode video frames std::cout << "Start recording." << std::endl; while (true) { // Get screenshot from X11 XShmGetImage(disp, root, ximg, 0, 0, AllPlanes); // Convert RGB to YUV420P sws_scale(swsctx, (const uint8_t * const *)frame->data, frame->linesize, 0, height, vframe->data, vframe->linesize); // Encode video frame vframe->pts = av_rescale_q(av_gettime(), {1, AV_TIME_BASE}, vctx->time_base); int ret = avcodec_send_frame(vctx, vframe); if (ret < 0) { std::cerr << "Error: Could not send video frame." << std::endl; return EXIT_FAILURE; } while (ret >= 0) { ret = avcodec_receive_packet(vctx, vpacket); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; else if (ret < 0) { std::cerr << "Error: Could not receive video packet." << std::endl; return EXIT_FAILURE; } av_packet_rescale_ts(vpacket, vctx->time_base, vstream->time_base); vpacket->stream_index = vstream->index; // Write video packet to output context av_interleaved_write_frame(outctx, vpacket); av_packet_unref(vpacket); } // Sleep for 40ms to limit framerate to 25fps std::this_thread::sleep_for(std::chrono::milliseconds(40)); } // Cleanup av_write_trailer(outctx); avcodec_free_context(&vctx); av_frame_free(&vframe); av_packet_free(&vpacket); av_frame_free(&frame); avformat_close_input(&outctx); XShmDetach(disp, &shminfo); XDestroyImage(ximg); XCloseDisplay(disp); return EXIT_SUCCESS; } ``` 这个示例代码假设你已经安装了FFmpeg 6.0和X11库,可以通过以下命令来编译它: ``` g++ -o screen_capture screen_capture.cpp -lX11 `pkg-config --cflags --libs libavutil libavcodec libavformat libswscale libswresample` ``` 请注意,这个示例代码只是一个简单的演示,并没有处理错误或异常情况。在实际应用中,你需要根据你的需要添加错误处理和异常处理代码。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值