tcp receive data 的三个队列

Prequeue queue: 

struct tcp_sock {
	
	/* Data for direct copy to user */
	struct {
		struct sk_buff_head	prequeue;
		struct task_struct	*task;
		struct msghdr		*msg;
		int			memory;
		int			len;
	} ucopy;
}

 Receive queue and backlog queue: 

struct sock {
	struct sk_buff_head	sk_receive_queue;
	/*
	 * The backlog queue is special, it is always used with
	 * the per-socket spinlock held and requires low latency
	 * access. Therefore we special case it's implementation.
	 * Note : rmem_alloc is in this structure to fill a hole
	 * on 64bit arches, not because its logically part of
	 * backlog.
	 */
	struct {
		atomic_t	rmem_alloc;
		int		len;
		struct sk_buff	*head;
		struct sk_buff	*tail;
	} sk_backlog;
#define sk_rmem_alloc sk_backlog.rmem_alloc
	int			sk_forward_alloc;
}

对prequeue的处理

bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{

	// 这里只是把skb连接到prequeue队列里来,并没有复制数据
	__skb_queue_tail(&tp->ucopy.prequeue, skb);

	// 增加统计信息
	tp->ucopy.memory += skb->truesize;

	// prequeue满了,或者user memory达到了上限,则清空prequeue
	if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
	    tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 
	{

		struct sk_buff *skb1;

		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
			sk_backlog_rcv(sk, skb1);

		tp->ucopy.memory = 0;
	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
		wake_up_interruptible_sync_poll(sk_sleep(sk),
					   POLLIN | POLLRDNORM | POLLRDBAND);
		if (!inet_csk_ack_scheduled(sk))
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  (3 * tcp_rto_min(sk)) / 4,
						  TCP_RTO_MAX);
	}
}

// 把数据写到user_space
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
		return __sk_backlog_rcv(sk, skb);

	return sk->sk_backlog_rcv(sk, skb); //.backlog_rcv		= tcp_v4_do_rcv,
}

 

©️2020 CSDN 皮肤主题: 大白 设计师:CSDN官方博客 返回首页