简介
suricata中的packet_poo是专门用于存放receive线程抓到的数据包的。
receive线程初始化时在TmThreadsSlotPktAcqLoop中调用PacketPoolInit来初始化它的packet_pool,并为packet_pool划分内存空间。默认是申请1024个packet结构大小的空间,以链表的开示存储。这里的packet_pool被记为“my_pool”,也就是receive的线程全局变量thread_pkt_pool。
worker线程初始化时在TmThreadsSlotVar中调用PacketPoolInitEmpty来初始化它的packet_pool,这里的packet_pool也被记为“my_pool”,也就是worker线程全局变量thread_pkt_pool。值得注意的是此处不会为worker线程的packet_pool申请内存空间,而是通过将worker线程的packet_pool->pending_pool指向receive线程的packet_pool等操作来进行包的回收。这点下面会详细讲到。
原码分析
在所有接收模式中(pcap, netmap, pfring, napatech等)都有一个循环接收数据包的函数,在该函数中每次都会调用PacketPoolWait来判断receive线程的packet_pool中是否有空闲的packet可用来接收数据包。如果有空闲的packet则PacketPoolWait会直接返回,进行后续的接收操作,如果没有,则会在SCCondWait处等待worker线程归还packet资源,并等待worker线程的cond信号量来唤醒receive线程。
以pcap模式为例,会在ReceivePcapLoop中调用PacketPoolWait。
void PacketPoolWait(void)
{
PktPool *my_pool = GetThreadPacketPool();
//my_pool就是thread_pkt_pool
if (PacketPoolIsEmpty(my_pool)) {
//当my_pool->head和my_pool->return_stack.head都为NULL时返回true。
SCMutexLock(&my_pool->return_stack.mutex);
SC_ATOMIC_ADD(my_pool->return_stack.sync_now, 1);
//给原子变量return_stack.sync_now加1,使得worker线程归还packet
SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex);
//程序会在此处等待worker的cond来唤醒线程
SCMutexUnlock(&my_pool->return_stack.mutex);
}
while(PacketPoolIsEmpty(my_pool))
cc_barrier();
}
在worker线程中针对每个包都会通过在TmqhOutputPacketpool中调用p->ReleasePacket指向的PacketPoolReturnPacket来归还packet资源。
/** \brief Return packet to Packet pool
*
*/
void PacketPoolReturnPacket(Packet *p)
{
PktPool *my_pool = GetThreadPacketPool();
//此处获取当前线程的全局线程变量thread_pkt_pool
PACKET_RELEASE_REFS(p);
PktPool *pool = p->pool;
//此处获取的是receive线程中存放p的packet_pool的地址
if (pool == NULL) {
PacketFree(p);
return;
}
#ifdef DEBUG_VALIDATION
BUG_ON(pool->initialized == 0);
BUG_ON(pool->destroyed == 1);
BUG_ON(my_pool->initialized == 0);
BUG_ON(my_pool->destroyed == 1);
#endif /* DEBUG_VALIDATION */
if (pool == my_pool) {
//只有当前线程为receive线程时才会进入此分支
/* Push back onto this thread's own stack, so no locking. */
p->next = my_pool->head;
my_pool->head = p;
} else {
//只有当前线程为worker线程时才会进入此分支
PktPool *pending_pool = my_pool->pending_pool;
if (pending_pool == NULL) {
/* No pending packet, so store the current packet. */
p->next = NULL;
my_pool->pending_pool = pool;
my_pool->pending_head = p;
my_pool->pending_tail = p;
my_pool->pending_count = 1;
/*将worker线程的packet_pool的pending_pool指向p所对应的r
eceive线程的packet_pool*/
} else if (pending_pool == pool) {
/* Another packet for the pending pool list. */
p->next = my_pool->pending_head;
my_pool->pending_head = p;
my_pool->pending_count++;
//使用头插的方式将将p插入到pending_head链表的头部,并增加计数
if (SC_ATOMIC_GET(pool->return_stack.sync_now) || my_pool->pending_count > max_pending_return_packets) {
//如果return_stack.sync_now不为0,或pending_count>32,进入此分支
/* Return the entire list of pending packets. */
SCMutexLock(&pool->return_stack.mutex);
my_pool->pending_tail->next = pool->return_stack.head;
pool->return_stack.head = my_pool->pending_head;
/*将worker线程的packet_pool中的pending_head到pending_tail这段链表
归还到receive线程的packet_pool的return_stack.head*/
SC_ATOMIC_RESET(pool->return_stack.sync_now);
SCMutexUnlock(&pool->return_stack.mutex);
SCCondSignal(&pool->return_stack.cond);
//发送信号唤醒receive线程
/* Clear the list of pending packets to return. */
my_pool->pending_pool = NULL;
my_pool->pending_head = NULL;
my_pool->pending_tail = NULL;
my_pool->pending_count = 0;
//将pending的相关内容清空,以便再次使用
}
} else {
/* Push onto return stack for this pool */
SCMutexLock(&pool->return_stack.mutex);
p->next = pool->return_stack.head;
pool->return_stack.head = p;
/*直接将p归还到receive线程的packet_pool的return_stack.head*/
SC_ATOMIC_RESET(pool->return_stack.sync_now);
SCMutexUnlock(&pool->return_stack.mutex);
SCCondSignal(&pool->return_stack.cond);
}
}
}