3个不同的消息以不同的速率发送到同一个端口:
消息大小(字节)每个传输速度发送
High23210 ms100Hz
Medium14820ms50Hz
低2060 ms16.6Hz
我每 ~ 6 ms 只能处理一条消息 .
单线程 . 阻止阅读 .
一种奇怪的情况正在发生,我没有解释 .
当我将接收缓冲区设置为 4,799 字节时,我的所有 low speed messages 都被删除了 .
我看到可能有一两个被处理,然后什么都没有 .
当我将接收缓冲区设置为 4,800 (或更高!)时,似乎所有低速消息都开始处理 . 我看到大约每秒16/17 .
这一点一直被观察到 . 发送数据包的应用程序始终在接收应用程序之前启动 . 创建套接字后,在开始处理之前,接收应用程序总是有很长的延迟 . 因此,处理开始时缓冲区始终为满,并且每次测试发生时它都不是相同的起始缓冲区 . 这是因为套接字是在发送方已经发送消息之后创建的,因此接收方可能会在发送周期的中间开始监听 .
Why does increasing the received buffer size a single byte, cause a huge change in low speed message processing?
我构建了一个表来更好地可视化预期的处理:
由于其中一些消息得到处理,因此可能会将更多消息放入队列而不是丢弃 .
尽管如此,我希望 4,799 字节缓冲区的行为与 4,800 字节相同 .
然而,这不是我所观察到的 .
我认为这个问题与低速消息与其他两条消息同时发送的事实有关 . 它总是在高/中速信息之后接收 . (已通过wireshark确认) .
例如,假设缓冲区开始时为空,很明显低速消息需要比其他消息排队更长 .
*每6ms 1条消息大约每30ms发送5条消息 .
这仍然不能解释缓冲区大小 .
我们正在运行VxWorks,并使用他们的sockLib,它是Berkeley套接字的一个实现 . 这是我们的套接字创建的代码片段:
SOCKET_BUFFER_SIZE 是我正在改变的 .
struct sockaddr_in tSocketAddress; // Socket address
int nSocketAddressSize = sizeof(struct sockaddr_in); // Size of socket address structure
int nSocketOption = 0;
// Already created
if (*ptParameters->m_pnIDReference != 0)
return FALSE;
// Create UDP socket
if ((*ptParameters->m_pnIDReference = socket(AF_INET, SOCK_DGRAM, 0)) == ERROR)
{
// Error
CreateSocketMessage(ptParameters, "CreateSocket: Socket create failed with error.");
// Not successful
return FALSE;
}
// Valid local address
if (ptParameters->m_szLocalIPAddress != SOCKET_ADDRESS_NONE_STRING && ptParameters->m_usLocalPort != 0)
{
// Set up the local parameters/port
bzero((char*)&tSocketAddress, nSocketAddressSize);
tSocketAddress.sin_len = (u_char)nSocketAddressSize;
tSocketAddress.sin_family = AF_INET;
tSocketAddress.sin_port = htons(ptParameters->m_usLocalPort);
// Check for any address
if (strcmp(ptParameters->m_szLocalIPAddress, SOCKET_ADDRESS_ANY_STRING) == 0)
tSocketAddress.sin_addr.s_addr = htonl(INADDR_ANY);
else
{
// Convert IP address for binding
if ((tSocketAddress.sin_addr.s_addr = inet_addr(ptParameters->m_szLocalIPAddress)) == ERROR)
{
// Error
CreateSocketMessage(ptParameters, "Unknown IP address.");
// Cleanup socket
close(*ptParameters->m_pnIDReference);
*ptParameters->m_pnIDReference = ERROR;
// Not successful
return FALSE;
}
}
// Bind the socket to the local address
if (bind(*ptParameters->m_pnIDReference, (struct sockaddr *)&tSocketAddress, nSocketAddressSize) == ERROR)
{
// Error
CreateSocketMessage(ptParameters, "Socket bind failed.");
// Cleanup socket
close(*ptParameters->m_pnIDReference);
*ptParameters->m_pnIDReference = ERROR;
// Not successful
return FALSE;
}
}
// Receive socket
if (ptParameters->m_eType == SOCKTYPE_RECEIVE || ptParameters->m_eType == SOCKTYPE_RECEIVE_AND_TRANSMIT)
{
// Set the receive buffer size
nSocketOption = SOCKET_BUFFER_SIZE;
if (setsockopt(*ptParameters->m_pnIDReference, SOL_SOCKET, SO_RCVBUF, (char *)&nSocketOption, sizeof(nSocketOption)) == ERROR)
{
// Error
CreateSocketMessage(ptParameters, "Socket buffer size set failed.");
// Cleanup socket
close(*ptParameters->m_pnIDReference);
*ptParameters->m_pnIDReference = ERROR;
// Not successful
return FALSE;
}
}
并且套接字接收在无限循环中被调用:
*缓冲区大小肯定足够大
int SocketReceive(int nSocketIndex, char *pBuffer, int nBufferLength)
{
int nBytesReceived = 0;
char szError[256];
// Invalid index or socket
if (nSocketIndex < 0 || nSocketIndex >= SOCKET_COUNT || g_pnSocketIDs[nSocketIndex] == 0)
{
sprintf(szError,"SocketReceive: Invalid socket (%d) or ID (%d)", nSocketIndex, g_pnSocketIDs[nSocketIndex]);
perror(szError);
return -1;
}
// Invalid buffer length
if (nBufferLength == 0)
{
perror("SocketReceive: zero buffer length");
return 0;
}
// Send data
nBytesReceived = recv(g_pnSocketIDs[nSocketIndex], pBuffer, nBufferLength, 0);
// Error in receiving
if (nBytesReceived == ERROR)
{
// Create error string
sprintf(szError, "SocketReceive: Data Receive Failure: ", errno);
// Set error message
perror(szError);
// Return error
return ERROR;
}
// Bytes received
return nBytesReceived;
}
Any clues on why increasing the buffer size to 4,800 results in successful and consistent reading of low speed messages?