全文以srs3.0为例进行分析
分析srs源码必须辅助gdb调试
root@e370746341b0:/srs/trunk# gdb ./objs/srs
root@e370746341b0:/srs/trunk# set args -c conf/srs.conf
root@e370746341b0:/srs/trunk#b main
Breakpoint 1 at 0xab858e: file ./src/main/srs_main_server.cpp, line 255.
从上面可以看到/src/main/srs_main_server.cpp中的main函数是全局入口函数
进入do_main函数,在该函数中主要做了三件事
-
_srs_config,全局配置文件
-
_srs_log,全局log文件
-
重点创建SrsServer对象并运行
//解析命令行参数
if ((err = _srs_config->parse_options(argc, argv)) != srs_success) {
return srs_error_wrap(err, "config parse options");
}
//设置工作目录和当前目录
string cwd = _srs_config->get_work_dir();
if (!cwd.empty() && cwd != "./" && (r0 = chdir(cwd.c_str())) == -1) {
return srs_error_new(-1, "chdir to %s, r0=%d", cwd.c_str(), r0);
}
if ((err = _srs_config->initialize_cwd()) != srs_success) {
return srs_error_wrap(err, "config cwd");
}
// 初始化log
if ((err = _srs_log->initialize()) != srs_success) {
return srs_error_wrap(err, "log initialize");
}
//创建SrsServer对象
_srs_server = new SrsServer();
if ((err = run(_srs_server)) != srs_success) {
return srs_error_wrap(err, "run");
}
在构造SrsServer对象会初始化http_api_mux和http_server
http_api_mux = new SrsHttpServeMux(); // HTTP请求多路复用器,不是http拉流的
http_server = new SrsHttpServer(this); // http服务
run(SrsServer* svr) 会初始化服务器和获取守护进程配置in_daemon(默认为false),如果in_daemon为false,直接执行 run_master(SrsServer* svr);否则会创建守护进程,在守护进程中运行
bool in_daemon = _srs_config->get_daemon();
if (in_daemon && _srs_in_docker && _srs_config->disable_daemon_for_docker()) {
srs_warn("disable daemon for docker");
in_daemon = false;
}
// If not daemon, directly run master.
if (!in_daemon) {
if ((err = run_master(svr)) != srs_success) {
return srs_error_wrap(err, "run master");
}
return srs_success;
}
srs_trace("start daemon mode...");
int pid = fork();
if(pid < 0) {
return srs_error_new(-1, "fork father process");
}
// grandpa
if(pid > 0) {
int status = 0;
waitpid(pid, &status, 0);
srs_trace("grandpa process exit.");
exit(0);
}
// father
pid = fork();
if(pid < 0) {
return srs_error_new(-1, "fork child process");
}
if(pid > 0) {
srs_trace("father process exit");
exit(0);
}
// son
srs_trace("son(daemon) process running.");
if ((err = run_master(svr)) != srs_success) {
return srs_error_wrap(err, "daemon run master");
}
run_master(SrsServer* svr) 函数中,服务器做一些初始化工作并调用listern监听客户端的连接,然后调用do_cycle函数(死循环),做一些监控,更新时间及缓存等。
svr->initialize_st() //初始化st协程库
svr->initialize_signal() //初始化信号
svr->acquire_pid_file() //将pid线程写入文件
svr->listen() //监听客户端请求
svr->register_signal() //注册信号
svr->http_handle() //注册http的处理模块
svr->ingest() //开启流采集
svr->cycle() //消息循环处理
重点关注SrsServer::listen(),使用gdb debug一下
(gdb) bt
#0 SrsServer::listen (this=0x555555a65ad0) at src/app/srs_app_server.cpp:872
#1 0x0000555555617442 in run_master (svr=0x555555a65ad0) at src/main/srs_main_server.cpp:470
#2 0x0000555555617058 in run (svr=0x555555a65ad0) at src/main/srs_main_server.cpp:410
#3 0x0000555555615b6e in do_main (argc=3, argv=0x7fffffffe738) at src/main/srs_main_server.cpp:185
#4 0x0000555555615c94 in main (argc=3, argv=0x7fffffffe738) at src/main/srs_main_server.cpp:193
下一个阶段
srs_error_t SrsServer::listen()
{
srs_error_t err = srs_success;
if ((err = listen_rtmp()) != srs_success) {
return srs_error_wrap(err, "rtmp listen");
}
if ((err = listen_http_api()) != srs_success) {
return srs_error_wrap(err, "http api listen");
}
if ((err = listen_http_stream()) != srs_success) {
return srs_error_wrap(err, "http stream listen");
}
if ((err = listen_stream_caster()) != srs_success) {
return srs_error_wrap(err, "stream caster listen");
}
if ((err = conn_manager->start()) != srs_success) {
return srs_error_wrap(err, "connection manager");
}
return err;
}
这里我们以分析rtmp为主,因此重点关注SrsServer::listen_rtmp()
srs_error_t SrsServer::listen_rtmp()
{
srs_error_t err = srs_success;
// stream service port.
std::vector<std::string> ip_ports = _srs_config->get_listens();
srs_assert((int)ip_ports.size() > 0);
//关闭SrsListenerRtmpStream类型的监听器,从listeners管理器中删除监听对象
//从监听队列中删除已有的该类监听器,防止冲突
close_listeners(SrsListenerRtmpStream);
for (int i = 0; i < (int)ip_ports.size(); i++) {
SrsListener* listener = new SrsBufferListener(this, SrsListenerRtmpStream);
listeners.push_back(listener);
int port; string ip;
srs_parse_endpoint(ip_ports[i], ip, port); //获取ip和port
//注意这里是多态,调用的子类的listen,即SrsBufferListener::listen
if ((err = listener->listen(ip, port)) != srs_success) {
srs_error_wrap(err, "rtmp listen %s:%d", ip.c_str(), port);
}
}
return err;
}
srs_error_t SrsBufferListener::listen(string i, int p)
{
srs_error_t err = srs_success;
ip = i;
port = p;
srs_freep(listener);
listener = new SrsTcpListener(this, ip, port);
//注意这里同样是虚函数,调用的是子类SrsTcpListener::listen
if ((err = listener->listen()) != srs_success) {
return srs_error_wrap(err, "buffered tcp listen");
}
string v = srs_listener_type2string(type);
srs_trace("%s listen at tcp://%s:%d, fd=%d", v.c_str(), ip.c_str(), port, listener->fd());
return err;
}
srs_error_t SrsTcpListener::listen()
{
srs_error_t err = srs_success;
if ((err = srs_tcp_listen(ip, port, &lfd)) != srs_success) { //创建监听的fd,并将fd注册到st库上
return srs_error_wrap(err, "listen at %s:%d", ip.c_str(), port);
}
srs_freep(trd);
trd = new SrsSTCoroutine("tcp", this); //创建一个协程
if ((err = trd->start()) != srs_success) { //启动协程
return srs_error_wrap(err, "start coroutine");
}
return err;
}
分析一下SrsSTCoroutine::start
srs_error_t SrsSTCoroutine::start()
{
srs_error_t err = srs_success;
if (started || disposed) {
if (disposed) {
err = srs_error_new(ERROR_THREAD_DISPOSED, "disposed");
} else {
err = srs_error_new(ERROR_THREAD_STARTED, "started");
}
if (trd_err == srs_success) {
trd_err = srs_error_copy(err);
}
return err;
}
//这里就是创建协程
if ((trd = (srs_thread_t)_pfn_st_thread_create(pfn, this, 1, 0)) == NULL) {
err = srs_error_new(ERROR_ST_CREATE_CYCLE_THREAD, "create failed");
srs_freep(trd_err);
trd_err = srs_error_copy(err);
return err;
}
started = true;
return err;
}
void* SrsSTCoroutine::pfn(void* arg)
{
SrsSTCoroutine* p = (SrsSTCoroutine*)arg;
srs_error_t err = p->cycle();
// Set the err for function pull to fetch it.
// @see https://github.com/ossrs/srs/pull/1304#issuecomment-480484151
if (err != srs_success) {
srs_freep(p->trd_err);
// It's ok to directly use it, because it's returned by st_thread_join.
p->trd_err = err;
}
return (void*)err;
}
srs_error_t SrsSTCoroutine::cycle()
{
if (_srs_context) {
if (context) {
_srs_context->set_id(context);
} else {
context = _srs_context->generate_id();
}
}
srs_error_t err = handler->cycle();
if (err != srs_success) {
return srs_error_wrap(err, "coroutine cycle");
}
// Set cycle done, no need to interrupt it.
cycle_done = true;
return err;
}
SrsSTCoroutine::start创建一个协程,并在协程中调用handler->cycle(),而handler是基类ISrsCoroutineHandler,而该类中的cycle又是一个虚函数,因此就是调用的是子类中的cycle()。
trd = new SrsSTCoroutine("tcp", this);
if ((err = trd->start()) != srs_success) { //调用this对象中的cycle,即SrsTcpListener::cycle()
return srs_error_wrap(err, "start coroutine");
}
srs_error_t SrsTcpListener::cycle()
{
srs_error_t err = srs_success;
while (true) {
if ((err = trd->pull()) != srs_success) {
return srs_error_wrap(err, "tcp listener");
}
srs_netfd_t fd = srs_accept(lfd, NULL, NULL, SRS_UTIME_NO_TIMEOUT);
if(fd == NULL){
return srs_error_new(ERROR_SOCKET_ACCEPT, "accept at fd=%d", srs_netfd_fileno(lfd));
}
if ((err = srs_fd_closeexec(srs_netfd_fileno(fd))) != srs_success) {
return srs_error_wrap(err, "set closeexec");
}
if ((err = handler->on_tcp_client(fd)) != srs_success) {
return srs_error_wrap(err, "handle fd=%d", srs_netfd_fileno(fd));
}
}
return err;
}
srs_accept函数内部accept阻塞,等待客户端的连接。当有RTMP客户端请求连接时触发该断点,比如进行RTMP推流,则会进入handler->on_tcp_client,这里handler是ISrsTcpHandler基类,而on_tcp_client是纯虚函数,因此指向子类SrsBufferListener::on_tcp_client
srs_error_t SrsBufferListener::on_tcp_client(srs_netfd_t stfd)
{
srs_error_t err = server->accept_client(type, stfd);
if (err != srs_success) {
srs_warn("accept client failed, err is %s", srs_error_desc(err).c_str());
srs_freep(err);
}
return srs_success;
}
srs_error_t SrsServer::accept_client(SrsListenerType type, srs_netfd_t stfd)
{
srs_error_t err = srs_success;
SrsConnection* conn = NULL;
if ((err = fd2conn(type, stfd, &conn)) != srs_success) {
if (srs_error_code(err) == ERROR_SOCKET_GET_PEER_IP && _srs_config->empty_ip_ok()) {
srs_close_stfd(stfd); srs_error_reset(err);
return srs_success;
}
return srs_error_wrap(err, "fd2conn");
}
srs_assert(conn);
// directly enqueue, the cycle thread will remove the client.
conns.push_back(conn);
// cycle will start process thread and when finished remove the client.
// @remark never use the conn, for it maybe destroyed.
if ((err = conn->start()) != srs_success) {
return srs_error_wrap(err, "start conn coroutine");
}
return err;
}
fd2conn函数将fd和conn进行绑定
srs_error_t SrsServer::fd2conn(SrsListenerType type, srs_netfd_t stfd, SrsConnection** pconn)
{
srs_error_t err = srs_success;
int fd = srs_netfd_fileno(stfd);
string ip = srs_get_peer_ip(fd);
// for some keep alive application, for example, the keepalived,
// will send some tcp packet which we cann't got the ip,
// we just ignore it.
if (ip.empty()) {
return srs_error_new(ERROR_SOCKET_GET_PEER_IP, "ignore empty ip, fd=%d", fd);
}
// check connection limitation.
int max_connections = _srs_config->get_max_connections();
if (handler && (err = handler->on_accept_client(max_connections, (int)conns.size())) != srs_success) {
return srs_error_wrap(err, "drop client fd=%d, max=%d, cur=%d for err: %s",
fd, max_connections, (int)conns.size(), srs_error_desc(err).c_str());
}
if ((int)conns.size() >= max_connections) {
return srs_error_new(ERROR_EXCEED_CONNECTIONS,
"drop fd=%d, max=%d, cur=%d for exceed connection limits",
fd, max_connections, (int)conns.size());
}
// avoid fd leak when fork.
// @see https://github.com/ossrs/srs/issues/518
if (true) {
int val;
if ((val = fcntl(fd, F_GETFD, 0)) < 0) {
return srs_error_new(ERROR_SYSTEM_PID_GET_FILE_INFO, "fnctl F_GETFD error! fd=%d", fd);
}
val |= FD_CLOEXEC;
if (fcntl(fd, F_SETFD, val) < 0) {
return srs_error_new(ERROR_SYSTEM_PID_SET_FILE_INFO, "fcntl F_SETFD error! fd=%d", fd);
}
}
if (type == SrsListenerRtmpStream) {
*pconn = new SrsRtmpConn(this, stfd, ip);
} else if (type == SrsListenerHttpApi) {
*pconn = new SrsHttpApi(this, stfd, http_api_mux, ip);
} else if (type == SrsListenerHttpStream) {
*pconn = new SrsResponseOnlyHttpConn(this, stfd, http_server, ip);
} else {
srs_warn("close for no service handler. fd=%d, ip=%s", fd, ip.c_str());
srs_close_stfd(stfd);
return err;
}
return err;
}
根据传入的类型type,pconn指向不同的子类,如果是rtmp,则指向SrsRtmpConn,那么conn->start(),开启一个协程,专门用来处理数据流(推流或拉流),调用SrsConnection::cycle()
srs_error_t SrsConnection::cycle()
{
srs_error_t err = do_cycle();
// Notify manager to remove it.
manager->remove(this);
// success.
if (err == srs_success) {
srs_trace("client finished.");
return err;
}
// client close peer.
// TODO: FIXME: Only reset the error when client closed it.
if (srs_is_client_gracefully_close(err)) {
srs_warn("client disconnect peer. ret=%d", srs_error_code(err));
} else if (srs_is_server_gracefully_close(err)) {
srs_warn("server disconnect. ret=%d", srs_error_code(err));
} else {
srs_error("serve error %s", srs_error_desc(err).c_str());
}
srs_freep(err);
return srs_success;
}
而do_cycle()是纯虚函数,connyou又指向SrsRtmpConn,所以调用子类SrsRtmpConn::do_cycle()
srs_error_t SrsRtmpConn::do_cycle()
{
srs_error_t err = srs_success;
srs_trace("RTMP client ip=%s, fd=%d", ip.c_str(), srs_netfd_fileno(stfd));
rtmp->set_recv_timeout(SRS_CONSTS_RTMP_TIMEOUT);
rtmp->set_send_timeout(SRS_CONSTS_RTMP_TIMEOUT);
if ((err = rtmp->handshake()) != srs_success) {
return srs_error_wrap(err, "rtmp handshake");
}
uint32_t rip = rtmp->proxy_real_ip();
if (rip > 0) {
srs_trace("RTMP proxy real client ip=%d.%d.%d.%d",
uint8_t(rip>>24), uint8_t(rip>>16), uint8_t(rip>>8), uint8_t(rip));
}
SrsRequest* req = info->req;
if ((err = rtmp->connect_app(req)) != srs_success) {
return srs_error_wrap(err, "rtmp connect tcUrl");
}
// set client ip to request.
req->ip = ip;
srs_trace("connect app, tcUrl=%s, pageUrl=%s, swfUrl=%s, schema=%s, vhost=%s, port=%d, app=%s, args=%s",
req->tcUrl.c_str(), req->pageUrl.c_str(), req->swfUrl.c_str(),
req->schema.c_str(), req->vhost.c_str(), req->port,
req->app.c_str(), (req->args? "(obj)":"null"));
// show client identity
if(req->args) {
std::string srs_version;
std::string srs_server_ip;
int srs_pid = 0;
int srs_id = 0;
SrsAmf0Any* prop = NULL;
if ((prop = req->args->ensure_property_string("srs_version")) != NULL) {
srs_version = prop->to_str();
}
if ((prop = req->args->ensure_property_string("srs_server_ip")) != NULL) {
srs_server_ip = prop->to_str();
}
if ((prop = req->args->ensure_property_number("srs_pid")) != NULL) {
srs_pid = (int)prop->to_number();
}
if ((prop = req->args->ensure_property_number("srs_id")) != NULL) {
srs_id = (int)prop->to_number();
}
if (srs_pid > 0) {
srs_trace("edge-srs ip=%s, version=%s, pid=%d, id=%d",
srs_server_ip.c_str(), srs_version.c_str(), srs_pid, srs_id);
}
}
if ((err = service_cycle()) != srs_success) {
err = srs_error_wrap(err, "service cycle");
}
srs_error_t r0 = srs_success;
if ((r0 = on_disconnect()) != srs_success) {
err = srs_error_wrap(err, "on disconnect %s", srs_error_desc(r0).c_str());
srs_freep(r0);
}
// If client is redirect to other servers, we already logged the event.
if (srs_error_code(err) == ERROR_CONTROL_REDIRECT) {
srs_error_reset(err);
}
return err;
}
srs_error_t SrsRtmpConn::service_cycle()
{
srs_error_t err = srs_success;
SrsRequest* req = info->req;
int out_ack_size = _srs_config->get_out_ack_size(req->vhost);
if (out_ack_size && (err = rtmp->set_window_ack_size(out_ack_size)) != srs_success) {
return srs_error_wrap(err, "rtmp: set out window ack size");
}
int in_ack_size = _srs_config->get_in_ack_size(req->vhost);
if (in_ack_size && (err = rtmp->set_in_window_ack_size(in_ack_size)) != srs_success) {
return srs_error_wrap(err, "rtmp: set in window ack size");
}
if ((err = rtmp->set_peer_bandwidth((int)(2.5 * 1000 * 1000), 2)) != srs_success) {
return srs_error_wrap(err, "rtmp: set peer bandwidth");
}
// get the ip which client connected.
std::string local_ip = srs_get_local_ip(srs_netfd_fileno(stfd));
// do bandwidth test if connect to the vhost which is for bandwidth check.
if (_srs_config->get_bw_check_enabled(req->vhost)) {
if ((err = bandwidth->bandwidth_check(rtmp, skt, req, local_ip)) != srs_success) {
return srs_error_wrap(err, "rtmp: bandwidth check");
}
return err;
}
// set chunk size to larger.
// set the chunk size before any larger response greater than 128,
// to make OBS happy, @see https://github.com/ossrs/srs/issues/454
int chunk_size = _srs_config->get_chunk_size(req->vhost);
if ((err = rtmp->set_chunk_size(chunk_size)) != srs_success) {
return srs_error_wrap(err, "rtmp: set chunk size %d", chunk_size);
}
// response the client connect ok.
if ((err = rtmp->response_connect_app(req, local_ip.c_str())) != srs_success) {
return srs_error_wrap(err, "rtmp: response connect app");
}
if ((err = rtmp->on_bw_done()) != srs_success) {
return srs_error_wrap(err, "rtmp: on bw down");
}
while (true) {
if ((err = trd->pull()) != srs_success) {
return srs_error_wrap(err, "rtmp: thread quit");
}
err = stream_service_cycle();
// stream service must terminated with error, never success.
// when terminated with success, it's user required to stop.
// TODO: FIXME: Support RTMP client timeout, https://github.com/ossrs/srs/issues/1134
if (err == srs_success) {
continue;
}
// when not system control error, fatal error, return.
if (!srs_is_system_control_error(err)) {
return srs_error_wrap(err, "rtmp: stream service");
}
// for republish, continue service
if (srs_error_code(err) == ERROR_CONTROL_REPUBLISH) {
// set timeout to a larger value, wait for encoder to republish.
rtmp->set_send_timeout(SRS_REPUBLISH_RECV_TIMEOUT);
rtmp->set_recv_timeout(SRS_REPUBLISH_SEND_TIMEOUT);
srs_info("rtmp: retry for republish");
srs_freep(err);
continue;
}
// for "some" system control error,
// logical accept and retry stream service.
if (srs_error_code(err) == ERROR_CONTROL_RTMP_CLOSE) {
// TODO: FIXME: use ping message to anti-death of socket.
// @see: https://github.com/ossrs/srs/issues/39
// set timeout to a larger value, for user paused.
rtmp->set_recv_timeout(SRS_PAUSED_RECV_TIMEOUT);
rtmp->set_send_timeout(SRS_PAUSED_SEND_TIMEOUT);
srs_trace("rtmp: retry for close");
srs_freep(err);
continue;
}
// for other system control message, fatal error.
return srs_error_wrap(err, "rtmp: reject");
}
return err;
}
srs_error_t SrsRtmpConn::stream_service_cycle()
{
srs_error_t err = srs_success;
SrsRequest* req = info->req;
if ((err = rtmp->identify_client(info->res->stream_id, info->type, req->stream, req->duration)) != srs_success) {
return srs_error_wrap(err, "rtmp: identify client");
}
srs_discovery_tc_url(req->tcUrl, req->schema, req->host, req->vhost, req->app, req->stream, req->port, req->param);
req->strip();
srs_trace("client identified, type=%s, vhost=%s, app=%s, stream=%s, param=%s, duration=%dms",
srs_client_type_string(info->type).c_str(), req->vhost.c_str(), req->app.c_str(), req->stream.c_str(), req->param.c_str(), srsu2msi(req->duration));
// discovery vhost, resolve the vhost from config
SrsConfDirective* parsed_vhost = _srs_config->get_vhost(req->vhost);
if (parsed_vhost) {
req->vhost = parsed_vhost->arg0();
}
if (req->schema.empty() || req->vhost.empty() || req->port == 0 || req->app.empty()) {
return srs_error_new(ERROR_RTMP_REQ_TCURL, "discovery tcUrl failed, tcUrl=%s, schema=%s, vhost=%s, port=%d, app=%s",
req->tcUrl.c_str(), req->schema.c_str(), req->vhost.c_str(), req->port, req->app.c_str());
}
// check vhost, allow default vhost.
if ((err = check_vhost(true)) != srs_success) {
return srs_error_wrap(err, "check vhost");
}
srs_trace("connected stream, tcUrl=%s, pageUrl=%s, swfUrl=%s, schema=%s, vhost=%s, port=%d, app=%s, stream=%s, param=%s, args=%s",
req->tcUrl.c_str(), req->pageUrl.c_str(), req->swfUrl.c_str(), req->schema.c_str(), req->vhost.c_str(), req->port,
req->app.c_str(), req->stream.c_str(), req->param.c_str(), (req->args? "(obj)":"null"));
// do token traverse before serve it.
// @see https://github.com/ossrs/srs/pull/239
if (true) {
info->edge = _srs_config->get_vhost_is_edge(req->vhost);
bool edge_traverse = _srs_config->get_vhost_edge_token_traverse(req->vhost);
if (info->edge && edge_traverse) {
if ((err = check_edge_token_traverse_auth()) != srs_success) {
return srs_error_wrap(err, "rtmp: check token traverse");
}
}
}
// security check
if ((err = security->check(info->type, ip, req)) != srs_success) {
return srs_error_wrap(err, "rtmp: security check");
}
// Never allow the empty stream name, for HLS may write to a file with empty name.
// @see https://github.com/ossrs/srs/issues/834
if (req->stream.empty()) {
return srs_error_new(ERROR_RTMP_STREAM_NAME_EMPTY, "rtmp: empty stream");
}
// client is identified, set the timeout to service timeout.
rtmp->set_recv_timeout(SRS_CONSTS_RTMP_TIMEOUT);
rtmp->set_send_timeout(SRS_CONSTS_RTMP_TIMEOUT);
// find a source to serve.
SrsSource* source = NULL;
if ((err = _srs_sources->fetch_or_create(req, server, &source)) != srs_success) {
return srs_error_wrap(err, "rtmp: fetch source");
}
srs_assert(source != NULL);
// update the statistic when source disconveried.
SrsStatistic* stat = SrsStatistic::instance();
if ((err = stat->on_client(srs_int2str(_srs_context->get_id()), req, this, info->type)) != srs_success) {
return srs_error_wrap(err, "rtmp: stat client");
}
bool enabled_cache = _srs_config->get_gop_cache(req->vhost);
srs_trace("source url=%s, ip=%s, cache=%d, is_edge=%d, source_id=%d/%d",
req->get_stream_url().c_str(), ip.c_str(), enabled_cache, info->edge, source->source_id(), source->pre_source_id());
source->set_cache(enabled_cache);
switch (info->type) {
case SrsRtmpConnPlay: {
// response connection start play
if ((err = rtmp->start_play(info->res->stream_id)) != srs_success) {
return srs_error_wrap(err, "rtmp: start play");
}
if ((err = http_hooks_on_play()) != srs_success) {
return srs_error_wrap(err, "rtmp: callback on play");
}
err = playing(source);
http_hooks_on_stop();
return err;
}
case SrsRtmpConnFMLEPublish: {
if ((err = rtmp->start_fmle_publish(info->res->stream_id)) != srs_success) {
return srs_error_wrap(err, "rtmp: start FMLE publish");
}
return publishing(source);
}
case SrsRtmpConnHaivisionPublish: {
if ((err = rtmp->start_haivision_publish(info->res->stream_id)) != srs_success) {
return srs_error_wrap(err, "rtmp: start HAIVISION publish");
}
return publishing(source);
}
case SrsRtmpConnFlashPublish: {
if ((err = rtmp->start_flash_publish(info->res->stream_id)) != srs_success) {
return srs_error_wrap(err, "rtmp: start FLASH publish");
}
return publishing(source);
}
default: {
return srs_error_new(ERROR_SYSTEM_CLIENT_INVALID, "rtmp: unknown client type=%d", info->type);
}
}
return err;
}
SrsRtmpConn::stream_service_cycle()真正地处理推拉流,如果推流则进入,SrsRtmpConn::publishing,而拉流则进入SrsRtmpConn::playing
后面接着分析推拉流,有不对之处欢迎指正