live555 RTSP流媒体服务器解决live555多线程版本运行效率问题的解决方案

在之前的博客中我们描述说过怎样将单线程改造为多线程, 现就多线程的效率问题再补充一点说明。

在GenericMediaServer.h中声明MultiThread_CORE_T结构体,如下:


#define MAX_BATCH_CLIENT_NUM		5
typedef struct __LIVE_THREAD_TASK_T
{
	int					id;
	TaskScheduler		*pSubScheduler;
	UsageEnvironment	*pSubEnv;
	char				liveURLSuffix[512];
	int					releaseChannel;		//释放标记
	int					handleDescribe;

	OSTHREAD_OBJ_T		*osThread;		//线程对象

	int					clientNum;
	void				*pClientConnectionPtr[MAX_BATCH_CLIENT_NUM];
	void				*procPtr;
	void				*extPtr;
}LIVE_THREAD_TASK_T;

#define	MAX_DEFAULT_MULTI_THREAD_NUM	256			//最大支持通道数

typedef struct __MultiThread_CORE_T
{
	int		threadNum;
	LIVE_THREAD_TASK_T	*threadTask;
}MultiThread_CORE_T;

在GenericMediaServer构造函数中,仅创建256个MultiThread_CORE_T, 实际的线程并不在此创建;

在处理客户端的DESCRIBE请求时, 先验证请求的资源是否在已有列表中, 如没有,这时才开始创建相应的工作线程, 如下:

//如果当前是主线程,则进入到查找通道流程
if (pEnv->GetEnvirId() == MAIN_THREAD_ID)
{
	UsageEnvironment  *pChEnv = fOurServer.GetEnvBySuffix(pEnv, urlTotalSuffix, this, pThreadTask, True);
	if (NULL == pChEnv)
	{
		handleCmdRet = -1;

		this->pClientConnectionEnv = NULL;
		handleCmd_notFound();

		break;
	}
	else
	{
		_TRACE(TRACE_LOG_DEBUG, (char*)"[%s]Set socket[%d] Assign to [%d:%s]\n", pEnv->GetEnvirName(), this->fOurSocket, pChEnv->GetEnvirId(), pChEnv->GetEnvirName());

		//将socket从主线程移到工作线程中
		pEnv->taskScheduler().disableBackgroundHandling(fOurSocket);

		return MAIN_THREAD_ID;
	}

	break;
}

主线程中主要调用了GenericMediaServer的GetEnvBySuffix函数,该函数实现了主线程中任务的分配,如下:

UsageEnvironment *GenericMediaServer::GetEnvBySuffix(UsageEnvironment *pMainThreadEnv, const char *urlSuffix, void *pClientConnection, 
										LIVE_THREAD_TASK_T **pThreadTask, Boolean bLockServerMediaSession)
{
	GenericMediaServer::ClientConnection	*pClient = (GenericMediaServer::ClientConnection *)pClientConnection;

	int iFreeIdx = -1;
	UsageEnvironment *pEnv = NULL;

	if ( (int)strlen(urlSuffix) < 1)
	{
		return NULL;
	}

	char streamName[512] = {0};

	int		iProcRet = 0;
	Boolean bRequestTooMany = False;
	if (bLockServerMediaSession)		LockServerMediaSession(pMainThreadEnv->GetEnvirName(), (char*)"GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);

	do
	{
		for (int i=0; i<multiThreadCore.threadNum; i++)
		{
			if ( (iFreeIdx<0) && (((int)strlen(multiThreadCore.threadTask[i].liveURLSuffix) < 1 )) && (multiThreadCore.threadTask[i].releaseChannel==0x00) ) 
			{
				iFreeIdx = i;
			}
			if ( 0 == strcmp(urlSuffix, multiThreadCore.threadTask[i].liveURLSuffix))
			{
				if (multiThreadCore.threadTask[i].releaseChannel>0x00)
				{
					iProcRet = -1;
					_TRACE(TRACE_LOG_DEBUG, (char *)"[%s] 当前通道正在被删除. 请稍候访问: %s\n", multiThreadCore.threadTask[i].pSubEnv->GetEnvirName(), urlSuffix);
					break;
				}

				if (NULL == multiThreadCore.threadTask[i].pSubEnv)
				{
					iProcRet = -2;
					break;
				}

				if (multiThreadCore.threadTask[i].pSubEnv->GetStreamStatus() == 0x00)
				{
					iProcRet = -3;
					break;
				}


				multiThreadCore.threadTask[i].pSubEnv->LockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
				if (multiThreadCore.threadTask[i].pSubEnv->GetLockFlag() != 0x00)
				{
					iProcRet = -4;
					multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
					break;
				}
				
				bool assignEnv = false;
				for (int k=0; k<MAX_BATCH_CLIENT_NUM; k++)
				{
					if (NULL == multiThreadCore.threadTask[i].pClientConnectionPtr[k])
					{
						assignEnv = true;
						multiThreadCore.threadTask[i].pClientConnectionPtr[k] = pClient;

						_TRACE(TRACE_LOG_INFO, (char*)"GenericMediaServer::GetEnvBySuffix  [%s] set [%d] to Index[%d]\n", urlSuffix, pClient->fOurSocket, k);

						strcpy(streamName, urlSuffix);

						break;
					}
				}

				if (assignEnv)
				{
					pEnv = multiThreadCore.threadTask[i].pSubEnv;
					//multiThreadCore.threadTask[i].subSocket = pClient->fOurSocket;
					pClient->pClientConnectionEnv = multiThreadCore.threadTask[i].pSubEnv;

					//multiThreadCore.threadTask[i].handleDescribe = 0x01;
					//*handleDescribe = &multiThreadCore.threadTask[i].handleDescribe;
					if (NULL != pThreadTask)	*pThreadTask = &multiThreadCore.threadTask[i];

					multiThreadCore.threadTask[i].clientNum ++;

					pEnv->IncrementReferenceCount();		//增加引用计数

					iProcRet = 0;

					_TRACE(TRACE_LOG_INFO, (char*)"共用通道GenericMediaServer::GetEnvBySuffix:: Channel already exist. New Connection[%d]   [%s][%s] ClientNum[%d]\n",
									pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(), urlSuffix, 
									multiThreadCore.threadTask[i].clientNum);
				}
				else
				{
					//没有找到有效的Env, 说明客户端列表已满

					iProcRet = -10;
					_TRACE(TRACE_LOG_ERROR, (char*)"GenericMediaServer::GetEnvBySuffix 当前通道客户端已满[%s]\n", urlSuffix);
				}

				multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);

				break;
			}
		}
		if (pEnv)			break;
		if (iFreeIdx<0)		break;

		if (iProcRet < 0)	break;

		if (NULL == multiThreadCore.threadTask[iFreeIdx].osThread)
		{
			CreateOSThread( &multiThreadCore.threadTask[iFreeIdx].osThread, __WorkerThread_Proc, (void *)&multiThreadCore.threadTask[iFreeIdx] );
		}

		
		multiThreadCore.threadTask[iFreeIdx].pClientConnectionPtr[0] = pClient;
		pClient->pClientConnectionEnv = multiThreadCore.threadTask[iFreeIdx].pSubEnv;

#ifdef _DEBUG
		for (int i=0; i<multiThreadCore.threadNum; i++)
		{
			if ( (int)strlen(multiThreadCore.threadTask[i].liveURLSuffix) > 0)
			{
				_TRACE(TRACE_LOG_DEBUG, (char *)"通道列表[%d:%s]: %s\n", i, multiThreadCore.threadTask[i].pSubEnv->GetEnvirName(),  multiThreadCore.threadTask[i].liveURLSuffix);

				if ( (0 == strcmp(urlSuffix, multiThreadCore.threadTask[i].liveURLSuffix)) )
				{
					 multiThreadCore.threadTask[i].releaseChannel = multiThreadCore.threadTask[i].releaseChannel;
				}
			}
		}
#endif

		pEnv = pClient->pClientConnectionEnv;

		strcpy(multiThreadCore.threadTask[iFreeIdx].liveURLSuffix, urlSuffix);

		strcpy(streamName, multiThreadCore.threadTask[iFreeIdx].liveURLSuffix);

		pEnv->IncrementReferenceCount();		//增加引用计数

		if (NULL != pThreadTask)	*pThreadTask = &multiThreadCore.threadTask[iFreeIdx];

		multiThreadCore.threadTask[iFreeIdx].clientNum ++;

		_TRACE(TRACE_LOG_INFO, (char*)"新建通道  GenericMediaServer::GetEnvBySuffix New Connection[%d] [%s][%s] ClientNum[%d]\n",
						pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(), 
						multiThreadCore.threadTask[iFreeIdx].liveURLSuffix, 
						multiThreadCore.threadTask[iFreeIdx].clientNum);
	}while (0);

	if (bLockServerMediaSession)		UnlockServerMediaSession(pMainThreadEnv->GetEnvirName(), "GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);

	//UnlockClientConnection();

	if (NULL != pEnv)
	{
		if ( (int)strlen(streamName) < 1)
		{
			_TRACE(TRACE_LOG_DEBUG, (char *)"#### ERROR\n");
		}
	}

	return pEnv;
}

到这里为止,针对当前客户端,主线程的工作已全部结束, 剩下的就是工作线程来工作了,工作线程被创建成功后,一直检测是否有需要处理的任务, 如果有新的客户端被分配到该线程,则又从handleCmd_DESCRIBE开始处理。

总结

live555工作线程和主线程的衔接点是在RTSPServer::RTSPClientConnection::handleCmd_DESCRIBE,即主线程仅处理到DESCRIBE命令,后面的处理全部由工作线程完成。

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值