流媒体分析之webrtc协议之ffmpeg 实现

1. 推流处理:

AVOutputFormat ff_webrtc_muxer = {
		.name           = "webrtc",
		.long_name      = "webrtc muxer",
		.priv_data_size    = sizeof(WEBRTCContext),
		.audio_codec       = AV_CODEC_ID_OPUS,
		.video_codec       = AV_CODEC_ID_H264,
		.init              = webrtc_init,
		.write_header      = webrtc_write_header,
		.write_packet      = webrtc_write_packet,
		.write_trailer     = webrtc_write_close,
		.deinit            = webrtc_deinit,
		.flags             = AVFMT_NOFILE | AVFMT_GLOBALHEADER,
		.priv_class        = &webrtc_muxer_class,
};

 webrtc_init  函数调用webrtc_open 函数。

webrtc_init
static int webrtc_init(struct AVFormatContext *s)
{
	WEBRTCContext *h = s->priv_data;
	printf("\n webrtc init>>>>>>>>>>>>>>>>>>>%s\n",s->filename);
	h->avctx = s;
	h->video_stream_index= -1;
	h->audio_stream_index = -1;
	h->time_base_den=30;
	int ret = webrtc_open(s, s->filename);
	if (ret) {
		av_log(s, AV_LOG_ERROR, "webrtc_write_header: webrtc_open failed, %s\n",                 av_err2str(ret));
		return ret;
	}
	return ret;
}

 webrtc_open  函数调用yang_create_metaConnection 函数创建webrtc 服务操作接口。

connectSfuServer 连接srs 服务器。

static int webrtc_open(AVFormatContext *h, const char *uri)
{
	WEBRTCContext *s = h->priv_data;
	av_log(h, AV_LOG_INFO, "webrtc_open %s\n", uri);
	s->video_stream_index=-1;
	s->audio_stream_index=-1;


	s->video_codec = AV_CODEC_ID_H264;
	s->audio_codec = AV_CODEC_ID_OPUS;

	if (!av_strstart(uri, "webrtc://", NULL))  {
		return AVERROR(EINVAL);
	}

	s->handle =(YangMetaConnection*) calloc(sizeof(YangMetaConnection),1);
	yang_create_metaConnection(s->handle);

    YangStreamConfig stream;
    memset(&stream,0,sizeof(YangStreamConfig));
    stream.rtcCallback.context=s;
    stream.rtcCallback.setMediaConfig=g_ff_rtc_setPlayMediaConfig;
    stream.rtcCallback.sendRequest=g_ff_rtc_sendRequest;
    stream.recvCallback.context=s;
    stream.recvCallback.receiveAudio=g_ff_rtc_receiveAudio;
    stream.recvCallback.receiveVideo=g_ff_rtc_receiveVideo;



	if(s->handle->init) s->handle->init(s->handle->session,&stream,s);
	if(s->handle->initParam) s->handle->initParam(s->handle->session,uri,Yang_Stream_Publish);
	//Yang_Server_Srs/Yang_Server_Zlm/Yang_Server_P2p
	int32_t mediaServer=Yang_Server_Srs;
	if(s->handle->connectSfuServer(s->handle->session,mediaServer)!=0){
		av_log(h, AV_LOG_ERROR, "connect failed! uri= %s\n",uri);
	}


	av_log(h, AV_LOG_INFO, "webrtc_open exit\n");
	return 0;
}

 数据发送:webrtc_write_packet 函数调用publishAudio 及publishVideo

发送音频及视频。


static int webrtc_write_packet(AVFormatContext *h, AVPacket *pkt)
{
	if(pkt==NULL) return 0;
	WEBRTCContext *s = h->priv_data;
	YangMetaConnection* metaconn=s->handle;

	int ret=0;
	if(s->video_stream_index==-1||s->audio_stream_index==-1){

		for(int i=0;i<h->nb_streams;i++){
			AVStream* st = h->streams[i];

			if(st->codecpar->codec_id ==AV_CODEC_ID_H264) {
				s->video_stream_index=st->index;	
				s->time_base_den=st->time_base.den;

				if(st->codecpar->extradata){
					metaconn->setExtradata(metaconn->session,Yang_VED_264,(uint8_t*)st->codecpar->extradata,st->codecpar->extradata_size);
				}			
			}
			if(st->codecpar->codec_id == AV_CODEC_ID_OPUS) s->audio_stream_index=st->index;
		}

	}



	if(pkt->stream_index==s->video_stream_index){
		//if(get_videodata_start(pkt->data)) return ret;	
		s->video_frame.nb=pkt->size;
		s->video_frame.payload=pkt->data;
		s->video_frame.pts=pkt->pts*1000000/s->time_base_den;

		ret=metaconn->publishVideo(metaconn->session,&s->video_frame);
	}else if(pkt->stream_index==s->audio_stream_index){
		s->audio_frame.nb=pkt->size;
		s->audio_frame.payload=pkt->data;
		s->audio_frame.pts=pkt->pts;
		ret=metaconn->publishAudio(metaconn->session,&s->audio_frame);
	}

	return ret;

}

2. 拉流处理:

AVInputFormat ff_webrtc_demuxer = {
    .name           = "webrtc",
    .long_name      = "webrtc demuxer",
    .priv_data_size = sizeof(WEBRTCContext),
    .read_probe     = webrtc_probe,
    .read_header    = webrtc_read_header,
    .read_packet    = webrtc_read_packet,
    .read_close     = webrtc_read_close,
    .extensions      = "webrtc",
    .priv_class     = &webrtc_class,
    .flags          = AVFMT_NOFILE,
};

#ifdef BUILD_AS_PLUGIN
void register_webrtc_demuxer()
{
    av_log(NULL, AV_LOG_INFO, "register_webrtc_demuxer\n");
    av_register_input_format(&ff_webrtc_demuxer);
}
#endif

连接服务器

webrtc_read_header 调用webrtc_open 连接srs服务器

static int webrtc_read_header(AVFormatContext *s)
{
    WEBRTCContext *h = s->priv_data;
    int ret;

    av_log(s, AV_LOG_INFO, "webrtc_read_header, filename %s\n", s->filename);

    s->flags |= AVFMT_FLAG_GENPTS;
    s->ctx_flags |= AVFMTCTX_NOHEADER;
    s->fps_probe_size = 0;
    s->max_analyze_duration = FFMAX(s->max_analyze_duration, 5*AV_TIME_BASE);
    s->probesize = FFMAX(s->probesize, 512*1024);
    h->avctx = s;
    h->video_stream_index_in = 0;
    h->audio_stream_index_in = 1;
    h->video_stream_index_out = -1;
    h->audio_stream_index_out = -1;
    ret = webrtc_open(s, s->filename);
    if (ret) {
        av_log(s, AV_LOG_ERROR, "webrtc_read_header: webrtc_open failed, %s\n", av_err2str(ret));
        return ret;
    }

    // 5秒收不到数据,超时退出
    ret = packet_queue_wait_start(&h->queue, h, INT64_C(1000) * 5000);
    if (ret) {
        av_log(s, AV_LOG_ERROR, "webrtc_read_header wait failed, %s\n", av_err2str(ret));
        webrtc_close(s);
        return ret;
    }

    av_log(s, AV_LOG_INFO, "webrtc_read_header exit\n");
    return 0;
}

 音视频数据接收:libmetartc 库收到数据,通过g_ff_rtc_receiveAudio 及g_ff_rtc_receiveVideo 接口返回音视频数据。将音视频数据存放到packet_queue_put 队列。

static void g_ff_rtc_receiveAudio(void* user,YangFrame *audioFrame){
	if(user==NULL) return;
	 WEBRTCContext *s = (WEBRTCContext*)user;
	    AVPacket *pkt = &s->audio_pkt;

	    av_new_packet(pkt, audioFrame->nb);
	   //  memcpy(pkt->data, s->video_header, 4);
	    memcpy(pkt->data, audioFrame->payload, audioFrame->nb);
	    pkt->stream_index = s->audio_stream_index_in;
	    pkt->dts = audioFrame->pts;
	    pkt->pts = audioFrame->pts;
	    packet_queue_put(&s->queue, pkt, s);
}
static void g_ff_rtc_receiveVideo(void* user,YangFrame *videoFrame){
	if(user==NULL) return;

	WEBRTCContext *s = (WEBRTCContext*) user;
	AVFormatContext *h = s->avctx;
	if (videoFrame->frametype == YANG_Frametype_Spspps) {
		if(s->extradata_size >0 ) return;
		uint8_t headers[128];
		memset(headers, 0, 128);
		int32_t headerLen = 0;
		if (s->handle->parseHeader)
			s->handle->parseHeader(s->video_codec, videoFrame->payload, headers,&headerLen);
		if (headerLen > 0) {
			s->extradata = av_malloc(headerLen);
			if (!s->extradata) {
				s->error_code = AVERROR(ENOMEM);
				return;
			}
			memcpy(s->extradata, headers, headerLen);
			s->extradata_size = headerLen;

			//just a fake video packet for create stream
			if (h->ctx_flags & AVFMTCTX_NOHEADER) {
				AVPacket *pkt = &s->video_pkt;
				av_new_packet(pkt, 0);
				pkt->stream_index = s->video_stream_index_in;
				packet_queue_put(&s->queue, pkt, s);
			}
		}
		return;
	}
	if(s->extradata_size ==0 ) return;
	   AVPacket *pkt = &s->video_pkt;
		
	    av_new_packet(pkt, videoFrame->nb);
	    memcpy(pkt->data, videoFrame->payload, videoFrame->nb);
	    memcpy(pkt->data, s->video_header, 4);
	 
	    pkt->stream_index = s->video_stream_index_in;
	    pkt->dts = videoFrame->pts;
	    pkt->pts = videoFrame->pts;
	    packet_queue_put(&s->queue, pkt, s);

}

 webrtc_read_packet  函数,从队列缓存通过packet_queue_get接口 取数据。

static int webrtc_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret, i;
    WEBRTCContext *h = s->priv_data;
    AVStream *st;

    do {
		ret = packet_queue_get(&h->queue, h, pkt);

        if (ret < 0)
            break;

        /* now find stream */
        for (i = 0; i < s->nb_streams; i++) {
            st = s->streams[i];
            if (pkt->stream_index == h->video_stream_index_in
                && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
                break;
            } else if (pkt->stream_index == h->audio_stream_index_in
                       && st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
                break;
            }
        }
        if (i == s->nb_streams) {
            static const enum AVMediaType stream_types[] = {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO};
            st = create_stream(s, stream_types[pkt->stream_index]);
            if (!st) {
                av_packet_unref(pkt);
                ret = AVERROR(ENOMEM);
                break;
            }
        }

        if (pkt->size <= 0) {
            // drop fake packet
            av_packet_unref(pkt);
            continue;
        }

        if (pkt->stream_index == h->video_stream_index_in) {
            pkt->stream_index = h->video_stream_index_out;

        } else if (pkt->stream_index == h->audio_stream_index_in) {
            pkt->stream_index = h->audio_stream_index_out;

        } else {
            ret = 0;
        }

        if (!ret) {
            av_log(s, AV_LOG_INFO, "drop pkt with index %d and continue\n",
                   pkt->stream_index);
            av_packet_unref(pkt);
        }
    } while (!ret);

    ret = ret > 0 ? 0 : ret;
    if (ret)
        av_log(s, AV_LOG_WARNING, "webrtc_read_packet, %s\n", av_err2str(ret));
    return ret;
}

 

 下一个章节我们分析metartc 库是实现webrtc 协议。

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
敬告:该系列的课程在抓紧录制更新中,敬请大家关注。敬告: 该系列的课程涉及:FFmpeg,WebRTC,SRS,Nginx,Darwin,Live555,等。包括:音视频流媒体、直播、Android、视频监控28181、等。 我将带领大家一起来学习WebRTC原理和编程知识,并动手搭建环境完成网页视频会话和文字聊天。具体内容包括: 一、Html与JavaScript小白入门二、WebRTC小白入门与流程原理分析三、网络打洞STUN和TURN四、信令服务器的原理与实战五、手撕WebRTC流程与代码六、亲自敲码踩坑搭建视频会话   音视频流媒体是一门很复杂的技术,涉及的概念、原理、理论非常多,很多初学者不学 基础理论,而是直接做项目,往往会看到c/c++的代码时一头雾水,不知道代码到底是什么意思,这是为什么呢? 因为没有学习音视频流媒体的基础理论,就比如学习英语,不学习基本单词,而是天天听英语新闻,总也听不懂。所以呢,一定要认真学习基础理论,然后再学习播放器、转码器、非编、流媒体直播、视频监控、等等。 梅老师从事音视频流媒体行业18年;曾在永新视博、中科大洋、百度、美国Harris广播事业部等公司就职,经验丰富;曾亲手主导广电直播全套项目,精通h.264/h.265/aac,曾亲自参与百度app上的网页播放器等实战产品。目前全身心自主创业,主要聚焦音视频+流媒体行业,精通音视频加密、流媒体在线转码快编等热门产品。    
要使用FFmpegWebRTC流推流到服务器,你可以按照以下步骤进行操作: 1. 设置WebRTC连接: 使用WebRTC API(如WebRTC RTCPeerConnection)建立一个与远程对等点的连接,获取视频和音频流。 2. 获取媒体流: 从WebRTC连接中获取视频和音频流。 ```javascript const videoStream = getVideoStreamFromWebRTC(); // 获取视频流 const audioStream = getAudioStreamFromWebRTC(); // 获取音频流 ``` 3. 将媒体流传递给FFmpeg: 将视频和音频流传递给FFmpeg,并设置推流的参数。 ```javascript const ffmpeg = FFmpeg.createFFmpeg({ log: true, corePath: '/path/to/ffmpeg-core.js', workerPath: '/path/to/ffmpeg-worker.js', dataPath: '/path/to/ffmpeg-core.wasm', }); await ffmpeg.load(); // 将视频和音频流传递给FFmpeg ffmpeg.FS('writeFile', 'input.mp4', videoStream); ffmpeg.FS('writeFile', 'input.wav', audioStream); // 设置推流的参数 const command = `-i input.mp4 -i input.wav -c:v copy -c:a aac -f flv rtmp://your-streaming-server-url`; await ffmpeg.run(command); ``` 4. 处理输出结果: 可以使用`ffmpeg.getOutput()`方法来获取FFmpeg命令的输出结果,或者使用`ffmpeg.save()`方法将输出结果保存到本地文件。 ```javascript const outputData = ffmpeg.getOutput(); // 或者 await ffmpeg.save('output.mp4'); ``` 请注意,上面的代码只是一个示例,你需要根据实际情况进行适当的修改,包括FFmpeg的路径、推流的URL和其他参数。此外,还需要确保你的服务器支持RTMP协议以接收推流数据。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值