【无标题】

该代码示例展示了如何使用C++和FFmpeg库从摄像头捕获音视频数据,然后将这些数据实时推流到RTMP服务器。主要涉及libavcodec、libavformat和libswscale等库,实现了视频设备的打开、解码、图像转换以及流的推送。
摘要由CSDN通过智能技术生成

GPT 使用C++实现从摄像头获取音视频然后推流到RTMP服务器

#include <iostream>
#include <string>
#include <thread>
#include <chrono>
#include <opencv2/opencv.hpp>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>

using namespace std;
using namespace cv;

class VideoCapture {
public:
    VideoCapture(int device, int width, int height, int fps) {
        av_register_all();
        avformat_network_init();
        avdevice_register_all();

        AVInputFormat *inputFormat = av_find_input_format("video4linux2");
        AVDictionary *options = NULL;
        av_dict_set(&options, "video_size", (to_string(width) + "x" + to_string(height)).c_str(), 0);
        av_dict_set(&options, "framerate", to_string(fps).c_str(), 0);
        av_dict_set(&options, "input_format", "mjpeg", 0);
        av_dict_set(&options, "pixel_format", "yuyv422", 0);
        av_dict_set(&options, "v4l2_device", ("/dev/video" + to_string(device)).c_str(), 0);

        AVFormatContext *formatContext = NULL;
        if (avformat_open_input(&formatContext, ("/dev/video" + to_string(device)).c_str(), inputFormat, &options) != 0) {
            cerr << "Failed to open video device" << endl;
            exit(1);
        }

        if (avformat_find_stream_info(formatContext, NULL) < 0) {
            cerr << "Failed to find stream information" << endl;
            exit(1);
        }

        int videoStreamIndex = -1;
        for (int i = 0; i < formatContext->nb_streams; i++) {
            if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
                videoStreamIndex = i;
                break;
            }
        }

        if (videoStreamIndex == -1) {
            cerr << "Failed to find video stream" << endl;
            exit(1);
        }

        AVCodecParameters *codecParameters = formatContext->streams[videoStreamIndex]->codecpar;
        AVCodec *codec = avcodec_find_decoder(codecParameters->codec_id);
        if (codec == NULL) {
            cerr << "Failed to find codec" << endl;
            exit(1);
        }

        codecContext = avcodec_alloc_context3(codec);
        if (codecContext == NULL) {
            cerr << "Failed to allocate codec context" << endl;
            exit(1);
        }

        if (avcodec_parameters_to_context(codecContext, codecParameters) < 0) {
            cerr << "Failed to copy codec parameters to codec context" << endl;
            exit(1);
        }

        if (avcodec_open2(codecContext, codec, NULL) < 0) {
            cerr << "Failed to open codec" << endl;
            exit(1);
        }

        frame = av_frame_alloc();
        if (frame == NULL) {
            cerr << "Failed to allocate frame" << endl;
            exit(1);
        }

        swsContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt, codecContext->width, codecContext->height, AV_PIX_FMT_BGR24, SWS_BILINEAR, NULL, NULL, NULL);
        if (swsContext == NULL) {
            cerr << "Failed to create sws context" << endl;
            exit(1);
        }
    }

    ~VideoCapture() {
        av_frame_free(&frame);
        avcodec_close(codecContext);
    }

    void start() {
        while (true) {
            if (av_read_frame(formatContext, &packet) < 0) {
                cerr << "Failed to read frame" << endl;
                exit(1);
            }

            if (packet.stream_index == videoStreamIndex) {
                if (avcodec_send_packet(codecContext, &packet) < 0) {
                    cerr << "Failed to send packet to codec" << endl;
                    exit(1);
                }

                while (avcodec_receive_frame(codecContext, frame) == 0) {
                    Mat image(codecContext->height, codecContext->width, CV_8UC3);
                    sws_scale(swsContext, frame->data, frame->linesize, 0, codecContext->height, image.data, image.step);

                    imshow("Video", image);
                    if (waitKey(1) == 'q') {
                        exit(0);
                    }
                }
            }

            av_packet_unref(&packet);
        }
    }

private:
    AVFormatContext *formatContext;
    AVCodecContext *codecContext;
    AVFrame *frame;
    AVPacket packet;
    int videoStreamIndex;
    SwsContext *swsContext;
};

class RTMPPusher {
public:
    RTMPPusher(string url) {
        av_register_all();
        avformat_network_init();

        AVOutputFormat *outputFormat = av_guess_format("flv", NULL, NULL);
        if (outputFormat == NULL) {
            cerr << "Failed to guess output format" << endl;
            exit(1);
        }

        AVFormatContext *formatContext = NULL;
        if (avformat_alloc_output_context2(&formatContext, outputFormat, NULL, url.c_str()) < 0) {
            cerr << "Failed to allocate output context" << endl;
            exit(1);
        }

        AVStream *videoStream = avformat_new_stream(formatContext, NULL);
        if (videoStream == NULL) {
            cerr << "Failed to create video stream" << endl;
            exit(1);
        }

        AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
        if (codec == NULL) {
            cerr << "Failed to find codec" << endl;
            exit(1);
        }

        AVCodecContext *codecContext = avcodec_alloc_context3(codec);
        if (codecContext == NULL) {
            cerr << "Failed to allocate codec context" << endl;
            exit(1);
        }

        codecContext->codec_id = codec->id;
        codecContext->codec_type = AVMEDIA_TYPE_VIDEO;
        codecContext->width = 640;
        codecContext->height = 480;
        codecContext->time_base = {1, 25};
        codecContext->gop_size = 10;
        codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

        if (formatContext->oformat->flags & AVFMT_GLOBALHEADER) {
            codecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        }

        if (avcodec_open2(codecContext, codec, NULL) < 0) {
            cerr << "Failed to open codec" << endl;
            exit(1);
        }

        if (avcodec_parameters_from_context(videoStream->codecpar, codecContext) < 0) {
            cerr << "Failed to copy codec parameters to stream" << endl;
            exit(1);
        }

        av_dump_format(formatContext, 0, url.c_str(), 1);

        if (!(outputFormat->flags & AVFMT_NOFILE)) {
            if (avio_open(&formatContext->pb, url.c_str(), AVIO_FLAG_WRITE) < 0) {
                cerr << "Failed to open output file" << endl;
                exit(1);
            }
        }

        if (avformat_write_header(formatContext, NULL) < 0) {
            cerr << "Failed to write header" << endl;
            exit(1);
        }

        this->formatContext = formatContext;
        this->codecContext = codecContext;
        this->videoStream = videoStream;
    }

    ~RTMPPusher() {
        av_write_trailer(formatContext);
        avcodec_close(codecContext);
        avformat_free_context(formatContext);
    }

    void push(Mat image) {
        AVFrame *frame = av_frame_alloc();
        if (frame == NULL) {
            cerr << "Failed to allocate frame" << endl;
            exit(1);
        }

        frame->format = codecContext->pix_fmt;
        frame->width = codecContext->width;
        frame->height = codecContext->height;

        if (av_frame_get_buffer(frame, 0) < 0) {
            cerr << "Failed to allocate frame buffer" << endl;
            exit(1);
        }

        Mat yuvImage;
        cvtColor(image, yuvImage, COLOR_BGR2YUV_I420);

        uint8_t *data[4];
        data[0] = yuvImage.data;
        data[1] = data[0] + codecContext->width * codecContext->height;
        data[2] = data[1] + codecContext->width * codecContext->height / 4;
        int linesize[4];
        linesize[0] = codecContext->width;
        linesize[1] = codecContext->width / 2;
        linesize[2] = codecContext->width / 2;

        sws_scale(sws_getContext(codecContext->width, codecContext->height, AV_PIX_FMT_YUV420P, codecContext->width, codecContext->height, codecContext->pix_fmt, SWS_BILINEAR, NULL, NULL, NULL), data, linesize, 0, codecContext->height, frame->data, frame->linesize);

        frame->pts = frameCount++;

        AVPacket packet;
        av_init_packet(&packet);
        packet.data = NULL;
        packet.size = 0;

        if (avcodec_send_frame(codecContext, frame) < 0) {
            cerr << "Failed to send frame to codec" << endl;
            exit(1);
        }

        while (avcodec_receive_packet(codecContext, &packet) == 0) {
            packet.stream_index = videoStream->index;
            av_interleaved_write_frame(formatContext, &packet);
            av_packet_unref(&packet);
        }

        av_frame_free(&frame);
    }

private:
    AVFormatContext *formatContext;
    AVCodecContext *codecContext;
    AVStream *videoStream;
    int frameCount = 0;
};

int main() {
    VideoCapture videoCapture(0, 640, 480, 25);
    RTMPPusher rtmpPusher("rtmp://localhost/live/stream");

    thread videoCaptureThread([&]() {
        videoCapture.start();
    });

    while (true) {
        Mat image;
        videoCapture >> image;

        rtmpPusher.push(image);

        imshow("Video", image);
        if (waitKey(1) == 'q') {
            break;
        }
    }

    videoCaptureThread.join();

    return 0;
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值