报文到此阶段,已经完成了MAC头封装,在调用驱动发送报文前还有一些工作要做:1)流量控制; 2)报文GSO分段;3)check sum计算;
1、dev_queue_xmit函数
static inline int dev_queue_xmit(struct sk_buff *skb)
{
return dev_queue_xmit_sk(skb->sk, skb);
}
2、dev_queue_xmit_sk函数
int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
{
return __dev_queue_xmit(skb, NULL);
}
3、__dev_queue_xmit函数
static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
struct Qdisc *q;
int rc = -ENOMEM;
skb_reset_mac_header(skb); //设置mac header偏移
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
rcu_read_lock_bh();
skb_update_prio(skb); //设置skb->priority值
/* If device/qdisc don't need skb->dst, release it right now while
* its hot in this cpu cache.
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE) //释放dst对象
skb_dst_drop(skb);
else
skb_dst_force(skb);
txq = netdev_pick_tx(dev, skb, accel_priv); //获取发送队列,根据报文计算出队列
q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
#endif
trace_net_dev_queue(skb);
if (q->enqueue) {
rc = __dev_xmit_skb(skb, q, dev, txq); //发送报文,基本进此分支
goto out;
}
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
Really, it is unlikely that netif_tx_lock protection is necessary
here. (f.e. loopback and IP tunnels are clean ignoring statistics
counters.)
However, it is possible, that they rely on protection
made by us here.
Check this and shot the lock. It is not prone from deadlocks.
Either shot noqueue qdisc, it is even simpler 8)
*/
if (dev->fla