main.cpp
#include <QApplication>
#include "Controller.h"
using namespace std;
int main(int argc,char *argv[])
{
QApplication a(argc,argv);
qDebug() << "main thread:" << QThread::currentThreadId();
Controller* controller = new Controller();
emit controller->operate("copy");
a.exec();
}
Controller.h
#include <QThread>
class Controller : public QObject
{
Q_OBJECT
public:
Controller(QObject* parent = nullptr);
~Controller();
public slots:
void handleResults(const QString &des);
signals:
void operate(const QString &cmd);
private:
QThread thread;
};
Controller.cpp
#include "Controller.h"
#include "AudioRecordWorker.h"
#include "VideoRecordWorker.h"
#include <QDebug>
Controller::Controller(QObject* parent)
: QObject(parent)
{
#if 1
//音频推流
AudioRecordWorker *audioRecord = new AudioRecordWorker();
audioRecord->moveToThread(&thread);
connect(&thread, &QThread::finished, audioRecord, &QObject::deleteLater);
connect(this, &Controller::operate, audioRecord, &AudioRecordWorker::doSomething);
connect(audioRecord, &AudioRecordWorker::resultNotify, this, &Controller::handleResults);
#else
//视频推流
VideoRecordWorker *videoRecord = new VideoRecordWorker();
videoRecord->moveToThread(&thread);
connect(videoRecord, &VideoRecordWorker::resultNotify, this, &Controller::handleResults);
connect(&thread, &QThread::finished, videoRecord, &QObject::deleteLater);
connect(this, &Controller::operate, videoRecord, &VideoRecordWorker::doSomething);
#endif
thread.start();
}
Controller::~Controller()
{
thread.quit();
thread.wait();
}
void Controller::handleResults(const QString &des)
{
qDebug() << "handleResults()" << des << "thread:" << QThread::currentThreadId();
}
AudioRecordWorker.h
#pragma once
#include <QObject>
#include <string>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
class XMediaEncode;
class VideoRecordWorker : public QObject
{
Q_OBJECT
public:
explicit VideoRecordWorker(QObject *parent = nullptr);
~VideoRecordWorker();
void mediaEncodeInit();
void startRecord();
void recordParameter(int sampleRate,int channels, int sampleByte , int nbSample);
public slots:
void doSomething(const QString& cmd);
signals:
void resultNotify(const QString& des);
private:
std::string inUrl = "rtsp://test:test@192.168.1.4";
//nginx-rtmp 直播服务器rtmp推流URL
std::string outUrl = "rtmp://0.0.0.0/live";
XMediaEncode *mediaEncode;
cv::VideoCapture cam;
cv::Mat frame;
int sampleRate = 44100;
int channels = 2;
int sampleByte = 2;
int nbSample = 1024;
int inWidth = 0;
int inHeight = 0;
int fps = 0;
};
VideoRecordWorker.cpp
#include <stdexcept> // 1. 更换包含头文件
#include <exception>
#include <iostream>
#include <string>
#include <QDebug>
#include <QThread>
#include "VideoRecordWorker.h"
#include "XMediaEncode.h"
using namespace cv;
using namespace std;
VideoRecordWorker::VideoRecordWorker(QObject *parent)
: QObject(parent)
{
}
VideoRecordWorker::~VideoRecordWorker()
{
if(cam.isOpened())
cam.release();
qDebug() << "~VideoRecordWorker()" << "thread:" << QThread::currentThreadId();
}
void VideoRecordWorker::mediaEncodeInit()
{
}
void VideoRecordWorker::doSomething(const QString &cmd)
{
qDebug() << "doSomething()" << cmd << "thread:" << QThread::currentThreadId();
startRecord();
emit resultNotify("doSomething ok!");
}
void VideoRecordWorker::startRecord()
{
//编码器和像素格式转换
mediaEncode = XMediaEncode::Get(0);
//cam.open(inUrl);
//1.使用opencv打开rtsp相机
cam.open(0);
if(!cam.isOpened())
{
throw logic_error("cam open failed");
}
cout<<inUrl<<"cam open success"<<endl;
inWidth = cam.get(CAP_PROP_FRAME_WIDTH);
inHeight = cam.get(CAP_PROP_FRAME_HEIGHT);
fps = cam.get(CAP_PROP_FPS);
//注册所有的编解码器
//注册所有的封装器
//注册所有网络协议
mediaEncode->register_ffmpeg();
//2.初始化像素格式转换的上下文初始化
//3.输出的数据结构
mediaEncode->inWidth = inWidth;
mediaEncode->inHeight = inHeight;
mediaEncode->outWidth = inWidth;
mediaEncode->outHeight = inHeight;
mediaEncode->InitScale();
if(!mediaEncode->InitVideoCodec())
{
std::cout<<"InitVideoCodec failed!"<<std::flush;
if(cam.isOpened())
cam.release();
return;
}
//初始化封装器的上下文
mediaEncode->Init(outUrl.c_str());
//添加视频或者音频
mediaEncode->AddStream(mediaEncode->vc);
mediaEncode->SendHead();
for(;;)
{
//读取rtsp视频帧,解码视频帧
if(!cam.grab())
{
continue;
}
//yuv转为rgb
//输入的数据结构
if(!cam.retrieve(frame))
{
continue;
}
//rgb to yuv
mediaEncode->inPixSize = frame.elemSize();
AVFrame *yuv = mediaEncode->RGBToYUV((char*)frame.data);
if(!yuv) continue;
AVPacket *pkt = mediaEncode->EncodeVideo(yuv);
if(!pkt) continue;
mediaEncode->SendFrame(pkt);
}
}
void VideoRecordWorker::recordParameter(int sampleRate,int channels, int sampleByte , int nbSample)
{
sampleRate = sampleRate;
channels = channels;
sampleByte = sampleByte;
nbSample = nbSample;
}
AudioRecordWorker.h
#include <QObject>
#include <QAudioSource>
#include <QAudioFormat>
#include <QMediaDevices>
#include <QAudioInput>
#include <string>
class XMediaEncode;
class AudioRecordWorker : public QObject
{
Q_OBJECT
public:
explicit AudioRecordWorker(QObject *parent = nullptr);
~AudioRecordWorker();
void mediaEncodeInit();
void startRecord();
void recordParameter(int sampleRate,int channels, int sampleByte , int nbSample);
public slots:
void doSomething(const QString& cmd);
signals:
void resultNotify(const QString& des);
private:
QAudioFormat format;
QAudioDevice info;
QAudioSource *audio = nullptr;
QIODevice *io = nullptr;
XMediaEncode *mediaEncode = nullptr;
std::string outUrl = "rtmp://0.0.0.0/live";
int sampleRate = 44100;
int channels = 2;
int sampleByte = 2;
int nbSample = 1024;
};
AudioRecordWorker.cpp
#include <QDebug>
#include <QThread>
#include <iostream>
#include "AudioRecordWorker.h"
#include "VideoRecordWorker.h"
#include "XMediaEncode.h"
using namespace std;
AudioRecordWorker::AudioRecordWorker(QObject *parent)
: QObject(parent)
{
//1.qt音频开始录制
format.setSampleRate(sampleRate);
format.setChannelCount(channels);
format.setSampleFormat(QAudioFormat::Int16);
info = QMediaDevices::defaultAudioInput();
if (!info.isFormatSupported(format))
{
qWarning() << "Default format not supported, trying to use the nearest.";
}
audio = new QAudioSource(format,this);
qDebug() << "AudioRecordWorker()" << "thread:" << QThread::currentThreadId();
mediaEncodeInit();
}
void AudioRecordWorker::mediaEncodeInit()
{
//初始化XMediaEncode
mediaEncode = XMediaEncode::Get();
mediaEncode->register_ffmpeg();
mediaEncode->channels = channels;
mediaEncode->nbSample = 1024;
mediaEncode->sampleRate = sampleRate;
mediaEncode->inSampleFmt = XSampleFMT::X_S16;
mediaEncode->outSampleFmt = XSampleFMT::X_FLATP;
//开始录制音频
io = audio->start();
if(!mediaEncode->InitResample())
{
std::cout<<"InitResample failed"<<flush;
return ;
}
if(!mediaEncode->InitAudioCode())
{
return;
}
//a.创建输出封装器上下文
if(!mediaEncode->Init(outUrl.c_str()))
{
std::cout<<"avformat_alloc_output_context2"<<flush;
}
//b.添加音频流
if(!mediaEncode->AddStream(mediaEncode->ac))
{
std::cout<<"AddStream failed"<<flush;
return;
}
//打开rtmp的网络输出IO
//写入封装头
if(!mediaEncode->SendHead())
{
std::cout<<"SendHead failed"<<std::endl;
}
}
AudioRecordWorker::~AudioRecordWorker()
{
qDebug() << "~AudioRecordWorker()" << "thread:" << QThread::currentThreadId();
}
void AudioRecordWorker::doSomething(const QString &cmd)
{
qDebug() << "doSomething()" << cmd << "thread:" << QThread::currentThreadId();
startRecord();
emit resultNotify("doSomething ok!");
}
void AudioRecordWorker::startRecord()
{
int frameSize = mediaEncode->nbSample*channels*sampleByte;
char *buf = new char[frameSize];
qDebug()<<"size = "<<frameSize;
int size = 0;
for(;;)
{
qint64 rev_len = io->bytesAvailable();
std::cout<<rev_len<<std::endl;
//一次读取一帧音频
if(rev_len < frameSize)
{
QThread::msleep(1);
continue;
}
int size = 0;
while(size != frameSize)
{
int len = io->read(buf+size,frameSize - size);
size += len;
}
if(size != frameSize) continue;
//已经读一帧源数据
cout << size << " "<<flush;
//重采样源数据
AVFrame *pcm = mediaEncode->Resample(buf);
if(!pcm)
{
std::cout<<"pcm == NULL"<<flush;
}
//pts 运算
//nb_sample/sample_rate = 一帧音频的秒数
//timebase pts = sec*timebase.den
AVPacket *pkt = mediaEncode->EncodeAudio(pcm);
if(!pkt) continue;
//cout<<pkt->size<<" "<<flush;
//推流
mediaEncode->SendFrame(pkt);
}
delete buf;
}
void AudioRecordWorker::recordParameter(int sampleRate,int channels, int sampleByte , int nbSample)
{
sampleRate = sampleRate;
channels = channels;
sampleByte = sampleByte;
nbSample = nbSample;
}
XMediaEncode.h
#pragma once
#include <string>
class AVFrame;
class AVPacket;
class AVCodecContext;
class AVFormatContext;
class AVStream;
enum XSampleFMT
{
X_S16 = 1,
X_FLATP = 8
};
//音视频编码接口类
class XMediaEncode
{
public:
//输入参数
int inWidth = 1280;
int inHeight = 720;
int inPixSize = 3;
int channels = 2;
int sampleRate = 44100;
XSampleFMT inSampleFmt = X_S16;
//输出参数
int outWidth = 1280;
int outHeight = 720;
int bitrate = 4000000;//压缩后每秒视频的bit位大小50KB
int fps = 25;
int nbSample = 1024;
XSampleFMT outSampleFmt = X_FLATP;
//工厂生产方法
static XMediaEncode *Get(unsigned char index = 0);
virtual void register_ffmpeg() = 0;
//初始化封装器的上下文
virtual bool Init(const char *url) = 0;
//初始化像素格式转换的上下文初始化
virtual bool InitScale() = 0;
//音频重采样上下文初始化
virtual bool InitResample() = 0;
//音频编码器初始化
virtual bool InitAudioCode() = 0;
virtual AVFrame* Resample(char *data) = 0;
virtual AVFrame* RGBToYUV(char *rgb) = 0;
//编码器的初始化
virtual bool InitVideoCodec() = 0;
//视频编码
virtual AVPacket *EncodeVideo(AVFrame *frame) = 0;
//音频编码
virtual AVPacket *EncodeAudio(AVFrame *frame) = 0;
//添加视频或者音频
virtual bool AddStream(AVCodecContext *c) = 0;
//打开rtmp网络IO,发送封装头
virtual bool SendHead() = 0;
//rtmp 帧推流
virtual bool SendFrame(AVPacket *pkt) = 0;
virtual ~XMediaEncode();
//视频编码器的上下文
AVCodecContext *vc = 0;
//音频编码器的上下文
AVCodecContext *ac = 0; //音频编码器上下文
AVFormatContext *ic = 0;
protected:
XMediaEncode();
//rtmp flv 封装器
std::string outUrl = "";
AVStream *vs = 0;
AVStream *as = 0;
};
XMediaEncode.cpp
#include "XMediaEncode.h"
#include <iostream>
#include <QDebug>
using namespace std;
extern "C"
{
#include <libswscale/swscale.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
}
class CXMediaEncode:public XMediaEncode
{
public:
void close()
{
if(vsc)
{
sws_freeContext(vsc);
vsc = NULL;
}
if(asc)
{
swr_free(&asc);
}
if(yuv)
{
av_frame_free(&yuv);
}
if(vc)
{
avcodec_free_context(&vc);
}
if(pcm)
{
av_frame_free(&pcm);
}
if(ic)
{
avformat_close_input(&ic);
vs = NULL;
}
vc = NULL;
outUrl = "";
vpts = 0;
av_packet_unref(&vpacket);
apts = 0;
av_packet_unref(&apacket);
}
void register_ffmpeg()
{
//注册所有的编解码器
avcodec_register_all();
//注册所有的封装器
av_register_all();
//注册所有网络协议
avformat_network_init();
}
bool Init(const char *url)
{
///5 封装器和视频流配置
//a.创建输出封装器上下文
int ret = avformat_alloc_output_context2(&ic,0,"flv",url);
this->outUrl = url;
if(ret != 0)
{
char buf[1024] = {0};
av_strerror(ret,buf,sizeof(buf) - 1);
cout<<buf;
return false;
}
return true;
}
bool InitAudioCode()
{
if(!CreateAudioCodec(AV_CODEC_ID_AAC))
{
return false;
}
ac->bit_rate = 40000;
ac->sample_rate = sampleRate;
ac->sample_fmt = AV_SAMPLE_FMT_FLTP;
ac->channels = channels;
ac->channel_layout = av_get_default_channel_layout(channels);
return OpenCodec(ac);
}
bool InitVideoCodec()
{
//4初始化编码上下文
//a 找到编码器
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if(!CreateVideoCodec(AV_CODEC_ID_H264))
{
cout<<"Can't find h264 encoder!"<<endl;
return false;
}
vc->bit_rate = 50*1024*8; //压缩后每秒视频的bit位大小 50kb
vc->width = outWidth;
vc->height = outHeight;
vc->time_base = {1,fps}; //时间基数
vc->framerate = {fps,1};
//画面组的大小,多少帧一个关键帧
vc->gop_size = 50;
vc->max_b_frames = 0;
vc->pix_fmt = AV_PIX_FMT_YUV420P;
//d 打开编码器
return OpenCodec(vc);
}
AVPacket *EncodeAudio(AVFrame *frame)
{
//pts 运算
//nb_sample/sample_rate = 一帧音频的秒数
//timebase pts = sec*timebase.den
pcm->pts = apts;
apts += av_rescale_q(pcm->nb_samples,{1,sampleRate},ac->time_base);
int ret = avcodec_send_frame(ac,pcm);
if(ret != 0) return NULL;
av_packet_unref(&apacket);
ret = avcodec_receive_packet(ac,&apacket);
if(ret != 0) return NULL;
cout<<apacket.size<<" "<<flush;
return &apacket;
}
AVPacket *EncodeVideo(AVFrame* frame)
{
av_packet_unref(&vpacket);
//h264编码
frame->pts = vpts;
vpts++;
int ret = avcodec_send_frame(vc,frame);
if(ret!=0)
return NULL;
//每次都会调用av_frame_unref(frame)
ret = avcodec_receive_packet(vc,&vpacket);
if(ret != 0 || vpacket.size <= 0)
return NULL;
return &vpacket;
}
bool InitScale()
{
//2.初始化格式转换的上下文
vsc = sws_getCachedContext(vsc,
inWidth,inHeight,AV_PIX_FMT_BGR24,//原宽度高度
outWidth,outHeight,AV_PIX_FMT_YUV420P,//输出宽,高,像素格式
SWS_BICUBIC,//尺寸变化算法
0,0,0
);
if(!vsc)
{
cout<<"sws_getCachedContext failed!";
return false;
}
//3.输出的数据结构
yuv = av_frame_alloc();
yuv->format = AV_PIX_FMT_YUV420P;
yuv->width = inWidth;
yuv->height = inHeight;
yuv->pts = 0;
//分配yuv空间
int ret = av_frame_get_buffer(yuv,32);
if(ret != 0)
{
char buf[1024] = {0};
av_strerror(ret, buf,sizeof(buf) - 1);
throw logic_error(buf);
}
return true;
}
AVFrame* RGBToYUV(char *rgb)
{
//rgb to yuv
//输入的数据格式
uint8_t *indata[AV_NUM_DATA_POINTERS] = {0};
//bgrbgrbgr
//plane inData[0]bbbb gggg rrrr
indata[0] = (uint8_t*)rgb;
int insize[AV_NUM_DATA_POINTERS] = {0};
//一行(宽)数据的字节数
insize[0] = inWidth * inPixSize;
int h = sws_scale(vsc,indata,insize,0,inHeight, //输入数据
yuv->data,yuv->linesize);
if(h<=0)
{
return NULL;
}
return yuv;
}
bool InitResample()
{
//音频重采样 上下文初始化
asc = swr_alloc_set_opts(asc,
av_get_default_channel_layout(channels), (AVSampleFormat)outSampleFmt, sampleRate, //输出格式
av_get_default_channel_layout(channels),(AVSampleFormat)inSampleFmt, sampleRate, //输入格式
0,0);
if(!asc)
{
cout<<"swr_alloc_set_opts failed!";
return false;
}
int ret = swr_init(asc);
if(ret != 0)
{
char err[1024] = {0};
av_strerror(ret,err,sizeof(err) - 1);
cout<<err<<endl;
return false;
}
std::cout<<"音频重采样上下文初始化成功"<<endl;
//音频从采样空间的分配
pcm = av_frame_alloc();
pcm->format = outSampleFmt;
pcm->channels = channels;
pcm->channel_layout = av_get_default_channel_layout(channels);
pcm->nb_samples = nbSample; //一帧音频一个通道的采样数
ret = av_frame_get_buffer(pcm,0); //给pcm分配存储空间
if(ret != 0)
{
char err[1024] = {0};
av_strerror(ret,err,sizeof(err) - 1);
cout<<err<<endl;
return false;
}
return true;
}
AVFrame* Resample(char *data)
{
const uint8_t *indata[AV_NUM_DATA_POINTERS] = {0};
indata[0] = (uint8_t*)data;
int len = swr_convert(
asc,pcm->data,pcm->nb_samples, //输出参数,输出存储地址和样本数量
indata,pcm->nb_samples);
qDebug()<<"swr_convert = "<<len<<" ";
if(len <= 0)
{
return NULL;
}
return pcm;
}
bool AddStream(AVCodecContext *c)
{
if(!c) return false;
//b.添加视频流
AVStream *st = avformat_new_stream(ic,NULL);
if(!st)
{
cout<<"avformat_new_stream failed"<<endl;
return false;
}
st->codecpar->codec_tag = 0;
//从编码器复制参数
avcodec_parameters_from_context(st->codecpar, c);
av_dump_format(ic,0,outUrl.c_str(),1);
if(c->codec_type == AVMEDIA_TYPE_VIDEO)
{
vc = c;
vs = st;
}
else if(c->codec_type == AVMEDIA_TYPE_AUDIO)
{
ac = c;
as = st;
}
return true;
}
bool SendHead()
{
//打开rtmp的网络输出IO
int ret = avio_open(&ic->pb,outUrl.c_str(),AVIO_FLAG_WRITE);
if(ret != 0)
{
char buf[1024] = {0};
av_strerror(ret,buf,sizeof(buf) - 1);
cout<<buf<<endl;
return false;
}
//写入封装头
ret = avformat_write_header(ic,NULL);
if(ret != 0)
{
char buf[1024] = {0};
av_strerror(ret,buf,sizeof(buf) - 1);
cout<<buf<<endl;
return false;
}
return true;
}
bool SendFrame(AVPacket *pkt)
{
if(!pkt || pkt->size <= 0 || !pkt->data)
{
std::cout<<"pkt is NULL"<<flush;
return false;
}
AVRational stime;
AVRational dtime;
//判断音视频
if(vc && vs && pkt->stream_index == vs->index)
{
stime = vc->time_base;
dtime = vs->time_base;
}
else if(ac && as && pkt->stream_index ==as->index)
{
stime = ac->time_base;
dtime = as->time_base;
}
else
{
return false;
}
//推流
pkt->pts = av_rescale_q(pkt->pts,stime,dtime);
pkt->dts = av_rescale_q(pkt->dts,stime,dtime);
pkt->duration = av_rescale_q(pkt->duration,stime,dtime);
int ret = av_interleaved_write_frame(ic,pkt);
if(ret == 0)
{
cout<<"#"<<flush;
}
return true;
}
private:
bool OpenCodec(AVCodecContext *c)
{
//打开音频编码器
int ret = avcodec_open2(c,0,0);
if(ret != 0)
{
char err[1024] = {0};
av_strerror(ret,err,sizeof(err) - 1);
cout<<err<<endl;
avcodec_free_context(&c);
cout<<"avcodec_open2 failed!"<<endl;
return false;
}
cout<<"avcodec_open2 success!"<<endl;
return true;
}
bool CreateAudioCodec(AVCodecID cid)
{
//一次读取一帧音频的字节数
///4 初始化音频编码器
AVCodec *codec = avcodec_find_encoder(cid);
if(!codec)
{
cout<<"avcodec_find_encoder failed!"<<endl;
return false;
}
ac = avcodec_alloc_context3(codec);
if(!ac)
{
cout<<"avcodec_alloc_context3 cid failed!"<<endl;
return false;
}
cout<<"avcodec_alloc_context3 success!"<<endl;
ac->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
ac->thread_count = 8;
return true;
}
bool CreateVideoCodec(AVCodecID cid)
{
//一次读取一帧音频的字节数
///4 初始化音频编码器
AVCodec *codec = avcodec_find_encoder(cid);
if(!codec)
{
cout<<"avcodec_find_encoder failed!"<<endl;
return false;
}
vc = avcodec_alloc_context3(codec);
if(!vc)
{
cout<<"avcodec_alloc_context3 cid failed!"<<endl;
return false;
}
cout<<"avcodec_alloc_context3 success!"<<endl;
vc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
vc->thread_count = 8;
return true;
}
SwsContext *vsc = NULL; //像素格式上下文
SwrContext *asc = NULL; //像素格式上下文
AVFrame *yuv = NULL;
AVFrame *pcm = NULL; //输出的pcm
AVPacket vpacket = {0}; //视频帧
AVPacket apacket = {0}; //音频帧
AVCodec *codec = NULL; //音频重采样的上下文
int vpts = 0;
int apts = 0;
};
XMediaEncode * XMediaEncode::Get(unsigned char index)
{
static bool isFirst = true;
if(isFirst)
{
//注册所有的编码器
avcodec_register_all();
isFirst = false;
}
static CXMediaEncode cxm[255];
return &cxm[index];
}
XMediaEncode::XMediaEncode()
{
}
XMediaEncode::~XMediaEncode()
{
}
CMakeLists.txt
cmake_minimum_required(VERSION 3.1)
project(opencv_example_project)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(CMAKE_AUTOMOC ON)
set(CMAKE_AUTORCC ON)
set(CMAKE_AUTOUIC ON)
set(CMAKE_BUILD_TYPE Debug)
find_package(OpenCV REQUIRED)
message(STATUS "OpenCV library status:")
message(STATUS " config: ${OpenCV_DIR}")
message(STATUS " version: ${OpenCV_VERSION}")
message(STATUS " libraries: ${OpenCV_LIBS}")
message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
find_library(AVCODEC_LIBRARY avcodec)
find_library(AVFORMAT_LIBRARY avformat)
find_library(AVUTIL_LIBRARY avutil)
find_library(AVDEVICE_LIBRARY avdevice)
find_package(Qt6 COMPONENTS Core)
find_package(Qt6 COMPONENTS Gui)
find_package(Qt6 COMPONENTS Multimedia)
find_package(Qt6 COMPONENTS Widgets)
add_executable(qt_audio_rtmp VideoRecordWorker.cpp XMediaEncode.cpp Controller.cpp main.cpp AudioRecordWorker.cpp)
target_link_libraries(qt_audio_rtmp PRIVATE
${OpenCV_LIBS}
Qt::Core
Qt::Gui
Qt::Multimedia
Qt::Widgets
pthread
swresample
m
swscale
avformat
avcodec
avutil
avfilter
avdevice
postproc
z
lzma
rt)
音频推流
xz@xiaqiu:~/study/csdn/rtmp/audio_to_rtmp/qt_audio_rtmp/build$ make
[ 12%] Automatic MOC and UIC for target qt_audio_rtmp
[ 12%] Built target qt_audio_rtmp_autogen
Scanning dependencies of target qt_audio_rtmp
[ 25%] Building CXX object CMakeFiles/qt_audio_rtmp.dir/Controller.cpp.o
[ 37%] Linking CXX executable qt_audio_rtmp
[100%] Built target qt_audio_rtmp
xz@xiaqiu:~/study/csdn/rtmp/audio_to_rtmp/qt_audio_rtmp/build$
xz@xiaqiu:~/study/csdn/rtmp/audio_to_rtmp/qt_audio_rtmp/build$ ls
CMakeCache.txt cmake_install.cmake Makefile qt_audio_rtmp_autogen
CMakeFiles config.tests qt_audio_rtmp
xz@xiaqiu:~/study/csdn/rtmp/audio_to_rtmp/qt_audio_rtmp/build$ ./qt_audio_rtmp
main thread: 0x7fbcb0d40d80
AudioRecordWorker() thread: 0x7fbcb0d40d80
音频重采样上下文初始化成功
avcodec_alloc_context3 success!
avcodec_open2 success!
Output #0, flv, to 'rtmp://0.0.0.0/live':
Stream #0:0: Audio: aac (LC), 44100 Hz, stereo, fltp, 40 kb/s
doSomething() "copy" thread: 0x7fbc9dffb700
size = 4096
ffplay rtmp://0.0.0.0/live 测试 nginx-rtmp 音频流
视频
xz@xiaqiu:~/study/csdn/rtmp/audio_to_rtmp/qt_audio_rtmp/build$ ./qt_audio_rtmp
main thread: 0x7f08c167bd80
doSomething() "copy" thread: 0x7f08bd509700
[ WARN:0] global ../modules/videoio/src/cap_gstreamer.cpp (935) open OpenCV | GStreamer warning: Cannot query video position: status=0, value=-1, duration=-1
rtsp://test:test@192.168.1.4cam open success
avcodec_alloc_context3 success!
[libx264 @ 0x7f08ac21dd80] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[libx264 @ 0x7f08ac21dd80] profile High, level 3.0
[libx264 @ 0x7f08ac21dd80] 264 - core 155 r2917 0a84d98 - H.264/MPEG-4 AVC codec - Copyleft 2003-2018 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=8 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=0 weightp=2 keyint=50 keyint_min=5 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=abr mbtree=1 bitrate=409 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
avcodec_open2 success!
Output #0, flv, to 'rtmp://0.0.0.0/live':
Stream #0:0: Video: h264, yuv420p, 640x480, q=2-31, 409 kb/s
ffplay rtmp://0.0.0.0/live 测试 nginx-rtmp 视频流