CDC EEM-USB网络共享

客户用我们的台式Android 7 高通设备通过母口USB上网,开机一段时间内会出现ping不通的问题排查。

1.CDC EEM USB设备插入上位机时,会被usbnet_probe()识别为USB网卡设备,然后进一步被识别为CDC EEM设备,是根据CLASS=2,SUBCLASS=0x0c,PROTO=7来识别的。

CDC EEM设备:
在这里插入图片描述

cdc_emm.c

#define USB_CLASS_COMM                  2
#define USB_CDC_SUBCLASS_EEM            0x0c
#define USB_CDC_PROTO_EEM               7
 
 
static const struct usb_device_id products[] = {
{
    USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_EEM,USB_CDC_PROTO_EEM),
    .driver_info = (unsigned long) &eem_info,
},
{
    /* EMPTY == end of list */
},
};
MODULE_DEVICE_TABLE(usb, products);
 
static struct usb_driver eem_driver = {
    .name =     "cdc_eem",
    .id_table = products,
    .probe =    usbnet_probe,
    .disconnect =   usbnet_disconnect,
    .suspend =  usbnet_suspend,
    .resume =   usbnet_resume,
    .disable_hub_initiated_lpm = 1,
};

这个时候ifconfig (或者ifconfig usb0 up) 会多出来一个usb0网卡:

在这里插入图片描述


当下位机发送数据到上位机的流程:
首先在识别到usb网卡插入时,会初始化一个工作队列负责数据接收:

int
usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
{
    ....
    INIT_WORK(&dev->bh_w, usbnet_bh_w);
    INIT_WORK (&dev->kevent, kevent);
    init_usb_anchor(&dev->deferred);
    dev->delay.function = usbnet_bh;
    dev->delay.data = (unsigned long) dev;
    init_timer (&dev->delay);
    mutex_init (&dev->phy_mutex);
    mutex_init(&dev->interrupt_mutex);
    dev->interrupt_count = 0;
    dev->net = net;
    strcpy (net->name, "usb%d");
    memcpy (net->dev_addr, node_id, sizeof node_id);
    ....
}
 
 
当该网卡收到数据时:
static void usbnet_bh (unsigned long param)
{
......
    while ((skb = skb_dequeue (&dev->done))) {
        entry = (struct skb_data *) skb->cb;
        switch (entry->state) {
        case rx_done:
            entry->state = rx_cleanup;
            rx_process (dev, skb);
            continue;
        case tx_done:
            kfree(entry->urb->sg);
        case rx_cleanup:
            usb_free_urb (entry->urb);
            dev_kfree_skb (skb);
            continue;
        default:
            netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
        }
    }
........
}
 
 
static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
{
 
    if (dev->driver_info->rx_fixup &&
        !dev->driver_info->rx_fixup (dev, skb)) {
        /* With RX_ASSEMBLE, rx_fixup() must update counters */
        if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
            dev->net->stats.rx_errors++;
        goto done;
    }
    // else network stack removes extra byte if we forced a short packet
 
    /* all data was already cloned from skb inside the driver */
    if (dev->driver_info->flags & FLAG_MULTI_PACKET)
        goto done;
 
    if (skb->len < ETH_HLEN) {
        dev->net->stats.rx_errors++;
        dev->net->stats.rx_length_errors++;
        netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
    } else {
        usbnet_skb_return(dev, skb);
        return;
    }
 
done:
    skb_queue_tail(&dev->done, skb);
}
 
 
 
最终都会通过usbnet_skb_return()发出去:
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
    ....
    skb->protocol = eth_type_trans (skb, dev->net);
    //而这次出问题ping不通,就是这个eth_type_trans()函数中判断sk_buff中MAC地址和网卡实际MAC地址不一致导致的
    //客户的设备mac地址获取有问题
    ----------->{
                    __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
                    {
 
                        if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
                            if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
                                skb->pkt_type = PACKET_BROADCAST;
                            else
                                skb->pkt_type = PACKET_MULTICAST;
                        }
                        else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
                           dev->dev_addr)))
                            skb->pkt_type = PACKET_OTHERHOST;
     
                    }
    ----------->}
    status = netif_rx_ni(skb);
    ....
}
 
 
int netif_rx_ni(struct sk_buff *skb)
{
    .....
    err = netif_rx_internal(skb);
    .....
    return err;
}
 
 
static int netif_rx_internal(struct sk_buff *skb)
{
    .....
        ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
    .....
    return et;
}
 
 
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
                  unsigned int *qtail)
{
    ......
    if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
        if (skb_queue_len(&sd->input_pkt_queue)) {
enqueue:
            ......
            //放入skb_buff 队列 唤醒对应CPU去处理
            __skb_queue_tail(&sd->input_pkt_queue, skb);
             
            ......
            return NET_RX_SUCCESS;
        }
 
        /* Schedule NAPI for backlog device
         * We can use non atomic operation since we own the queue lock
         */
        if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
            if (!rps_ipi_queued(sd))
                {
                ____napi_schedule(sd, &sd->backlog);
                }
        }
        goto enqueue;
    }
 
drop:
    sd->dropped++;
    rps_unlock(sd);
 
    local_irq_restore(flags);
 
    atomic_long_inc(&skb->dev->rx_dropped);
    kfree_skb(skb);
    return NET_RX_DROP;
}

对应CPU收到中断消息处理对应skb_buff队列:

static int __init net_dev_init(void)
{
    .....
        for_each_possible_cpu(i) {
        struct softnet_data *sd = &per_cpu(softnet_data, i);
 
        skb_queue_head_init(&sd->input_pkt_queue);
        skb_queue_head_init(&sd->process_queue);
        INIT_LIST_HEAD(&sd->poll_list);
        sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
        sd->csd.func = rps_trigger_softirq;
        sd->csd.info = sd;
        sd->cpu = i;
#endif
 
        sd->backlog.poll = process_backlog;
        sd->backlog.weight = weight_p;
    }
    .....
}
 
 
 
 
static int process_backlog(struct napi_struct *napi, int quota)
{
    ....
        while (1) {
        struct sk_buff *skb;
 
        while ((skb = __skb_dequeue(&sd->process_queue))) {
            rcu_read_lock();
            local_irq_enable();
            __netif_receive_skb(skb);
            rcu_read_unlock();
            local_irq_disable();
            input_queue_head_incr(sd);
            if (++work >= quota) {
                goto state_changed;
            }
        }
        .....
    }
    ....
}
 
 
static int __netif_receive_skb(struct sk_buff *skb)
{
    int ret;
    if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
        unsigned long pflags = current->flags;
 
        /*
         * PFMEMALLOC skbs are special, they should
         * - be delivered to SOCK_MEMALLOC sockets only
         * - stay away from userspace
         * - have bounded memory usage
         *
         * Use PF_MEMALLOC as this saves us from propagating the allocation
         * context down to all allocation sites.
         */
        current->flags |= PF_MEMALLOC;
        ret = __netif_receive_skb_core(skb, true);
        tsk_restore_flags(current, pflags, PF_MEMALLOC);
    } else
        ret = __netif_receive_skb_core(skb, false);
 
    return ret;
}
 
 
 
static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
{
    ......
    if (pt_prev) {
        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
            goto drop;
        else
        {
            //此处的func=int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)//
            ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
        }
    }
    ......
}
 
 
 
 
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
    ...
    //通过NF_HOOK调用IPTABLE 配置的表和规则
    return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL,
               ip_rcv_finish);
    ...
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值