dpdk mellanox网卡 多线程hang住的问题

13 篇文章 6 订阅
6 篇文章 0 订阅

对于mellanox网卡,使用dpdk driver时,在多线程场景下,如果这几个线程分别获取link状态,或者统计计数,或者设置mtu等,就会出现线程被堵塞的问题,下面使用dpdk的example l2fwd复现此问题,并分析原因。

复现

# cd dpdk-stable-18.11.2
# export RTE_TARGET=build
# export RTE_SDK=`pwd`
# make config T=x86_64-native-linuxapp-gcc
# make -j32
# cd examples/l2fwd

稍微修改下l2fwd的main.c文件,如下,第一个线程获取link状态,第二个线程设置mtu

static void
l2fwd_main_loop(void)
{
        unsigned lcore_id;

        prev_tsc = 0;
        timer_tsc = 0;

        lcore_id = rte_lcore_id();
        qconf = &lcore_queue_conf[lcore_id];

        struct rte_eth_link eth_link;
        while (!force_quit) {

            if (lcore_id == 0) {
                rte_eth_link_get(qconf->rx_port_list[0], &eth_link);
                RTE_LOG(INFO, L2FWD, "link is %d on core %d\n", eth_link.link_status, lcore_id);
            }
            else if (lcore_id == 1) {
                rte_eth_dev_set_mtu(qconf->rx_port_list[0], 1500);
                RTE_LOG(INFO, L2FWD, "set mtu on core %d\n", lcore_id);
            }
            usleep(300);
        }
}

编译并运行l2fwd,通过 -c 指定两个cpu,可看到线程已经hang住了

#make
# ./build/l2fwd -c3 -n4 -w 82:00.1 -- -p1
EAL: Detected 40 lcore(s)
EAL: Detected 2 NUMA nodes
EAL: Multi-process socket /var/run/dpdk/rte/mp_socket
EAL: Probing VFIO support...
EAL: VFIO support initialized
EAL: PCI device 0000:82:00.1 on NUMA socket 1
EAL:   probe driver: 15b3:1015 net_mlx5
MAC updating enabled
Notice: odd number of ports in portmask.
Lcore 0: RX port 0
Initializing port 0... done:
Port 0, MAC address: 50:6B:4B:C0:9B:C5


Checking link statusdone
Port0 Link Up. Speed 25000 Mbps - full-duplex


^C

分析原因

使用gdb来查看线程状态和调用栈

# ps -ef | grep l2fwd
root       8344   7232  0 05:45 pts/3    00:00:00 ./build/l2fwd -c3 -n4 -w 82:00.1 -- -p1
root       8353   7790  0 05:47 pts/0    00:00:00 grep --color=auto l2fwd

# gdb -p 8344
...
//一共四个线程,线程1和线程4为获取link状态和设置mtu的线程,都已经堵塞在了recvmsg调用上。
(gdb) info thread
  Id   Target Id         Frame
  1    Thread 0x7f68e4981c00 (LWP 8344) "l2fwd" 0x00007f68e35ce94d in recvmsg () at ../sysdeps/unix/syscall-template.S:84
  2    Thread 0x7f68e2d71700 (LWP 8345) "eal-intr-thread" 0x00007f68e32fba13 in epoll_wait () at ../sysdeps/unix/syscall-template.S:84
  3    Thread 0x7f68e2570700 (LWP 8346) "rte_mp_handle" 0x00007f68e35ce94d in recvmsg () at ../sysdeps/unix/syscall-template.S:84
* 4    Thread 0x7f68e1d6f700 (LWP 8347) "lcore-slave-1" 0x00007f68e35ce94d in recvmsg () at ../sysdeps/unix/syscall-template.S:84
//线程1堵塞在获取link状态上
(gdb) thread 1
[Switching to thread 1 (Thread 0x7f68e4981c00 (LWP 8344))]
#0  0x00007f68e35ce94d in recvmsg () at ../sysdeps/unix/syscall-template.S:84
84      ../sysdeps/unix/syscall-template.S: No such file or directory.
(gdb) bt
#0  0x00007f68e35ce94d in recvmsg () at ../sysdeps/unix/syscall-template.S:84
#1  0x00000000007f2ad6 in mlx5_nl_recv (nlsk_fd=18, sn=2089018456, cb=0x7f2c70 <mlx5_nl_ifindex_cb>, arg=0x7fff4edcd440)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_nl.c:266
#2  0x00000000007f41de in mlx5_nl_ifindex (nl=18, name=name@entry=0x43003e75f8 "mlx5_1")
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_nl.c:782
#3  0x00000000007d6015 in mlx5_get_ifname (dev=0xf7cf40 <rte_eth_devices>, ifname=0x7fff4edcd780)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_ethdev.c:225
#4  0x00000000007d6869 in mlx5_ifreq (ifr=0x7fff4edcd780, req=35091, dev=0xf7cf40 <rte_eth_devices>)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_ethdev.c:285
#5  mlx5_link_update_unlocked_gs (dev=dev@entry=0xf7cf40 <rte_eth_devices>, link=link@entry=0x7fff4edcd830)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_ethdev.c:695
#6  0x00000000007d8833 in mlx5_link_update (dev=0xf7cf40 <rte_eth_devices>, wait_to_complete=1)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_ethdev.c:804
#7  0x000000000051b1cf in rte_eth_link_get (port_id=<optimized out>, eth_link=0x7fff4edcd8a0)
    at /root/dpdk-stable-18.11.2/lib/librte_ethdev/rte_ethdev.c:1913
#8  0x000000000047be2e in l2fwd_main_loop () 
    at /root/dpdk-stable-18.11.2/examples/l2fwd/main.c:210
#9  0x000000000047c1dc in l2fwd_launch_one_lcore (dummy=0x0) at /root/dpdk-stable-18.11.2/examples/l2fwd/main.c:296
#10 0x0000000000562b7b in rte_eal_mp_remote_launch (f=0x47c1cb <l2fwd_launch_one_lcore>, arg=0x0, call_master=CALL_MASTER)
    at /root/dpdk-stable-18.11.2/lib/librte_eal/common/eal_common_launch.c:62
#11 0x000000000047d234 in main (argc=2, argv=0x7fff4edce890) at /root/dpdk-stable-18.11.2/examples/l2fwd/main.c:739
(gdb) info local
No locals.
(gdb) f 1
#1  0x00000000007f2ad6 in mlx5_nl_recv (nlsk_fd=18, sn=2089018456, cb=0x7f2c70 <mlx5_nl_ifindex_cb>, arg=0x7fff4edcd440)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_nl.c:266
266                             recv_bytes = recvmsg(nlsk_fd, &msg, 0);
(gdb) info local
nh = <optimized out>
recv_bytes = <optimized out>
sa = {nl_family = 16, nl_pad = 0, nl_pid = 0, nl_groups = 0}
buf = "|\000\000\000\001\024\002\000X\344\203|\230 \000\000\b\000\001\000\003\000\000\000\v\000\002\000mlx5_3\000\000\b\000\003\000\001\000\000\000\f\000\004\000\066\034r\255\027\000\000 \017\000\005\000\061\064.29.2002\000\000\f\000\006\000\000\000\000\000\000\000\000\000\f\000\a\000ě\300\000\003KkP\005\000\016\000\001\000\000\000\005\000T\000\001\000\000\000\t\000C\000roce", '\000' <repeats 16 times>, "\060-\177", '\000' <repeats 13 times>, "\001\000\000\000\v\000\000\000T\000\000\000\001\024\000\000\000U\334N\377\177\000\000\022\000\000\000\000\000\000\000\016P\233Q", '\000' <repeats 12 times>, "T"...
iov = {iov_base = 0x7fff4edc53e0, iov_len = 32768}
msg = {msg_name = 0x7fff4edc5380, msg_namelen = 12, msg_iov = 0x7fff4edc5390, msg_iovlen = 1, msg_control = 0x0, msg_controllen = 0,
  msg_flags = 0}
multipart = <optimized out>
ret = <optimized out>
//接收数据的seq为2089018456,send时的seq也为2089018456,说
//明接收到了期望数据。nlmsg_flags 为2(NLM_F_MULTI),
//nlmsg_type 不为0x3(NLMSG_DONE)说明还有后续的数据,所以
//继续调用recvmsg接收
(gdb) p *(struct nlmsghdr *)buf
$7 = {nlmsg_len = 124, nlmsg_type = 5121, nlmsg_flags = 2, nlmsg_seq = 2089018456, nlmsg_pid = 8344}

(gdb) f 2
#2  0x00000000007f41de in mlx5_nl_ifindex (nl=18, name=name@entry=0x43003e75f8 "mlx5_1")
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_nl.c:782
782             ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data);
//ibindex = 1 是第一次接收的数据解析出来的内容
(gdb) info local
seq = 2089018456
data = {name = 0x43003e75f8 "mlx5_1", ibindex = 1, ifindex = 0}
req = {nh = {nlmsg_len = 16, nlmsg_type = 5121, nlmsg_flags = 773, nlmsg_seq = 2089018456, nlmsg_pid = 0},
  buf = "\020\000\000\000\001\024\005\003X\344\203|", '\000' <repeats 19 times>}
na = <optimized out>
ret = <optimized out>

//线程4堵塞在set mtu上
(gdb) thread 4
[Switching to thread 4 (Thread 0x7f68e1d6f700 (LWP 8347))]
#0  0x00007f68e35ce94d in recvmsg () at ../sysdeps/unix/syscall-template.S:84
84      ../sysdeps/unix/syscall-template.S: No such file or directory.
(gdb) bt
#0  0x00007f68e35ce94d in recvmsg () at ../sysdeps/unix/syscall-template.S:84
#1  0x00000000007f2ad6 in mlx5_nl_recv (nlsk_fd=18, sn=628175011, cb=0x7f2c70 <mlx5_nl_ifindex_cb>, arg=0x7f68e1d6d020)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_nl.c:266
#2  0x00000000007f41de in mlx5_nl_ifindex (nl=18, name=name@entry=0x43003e75f8 "mlx5_1")
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_nl.c:782
#3  0x00000000007d6015 in mlx5_get_ifname (dev=0xf7cf40 <rte_eth_devices>, ifname=0x7f68e1d6d360)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_ethdev.c:225
#4  0x00000000007d6869 in mlx5_ifreq (ifr=0x7f68e1d6d360, req=35091, dev=0xf7cf40 <rte_eth_devices>)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_ethdev.c:285
#5  mlx5_link_update_unlocked_gs (dev=dev@entry=0xf7cf40 <rte_eth_devices>, link=link@entry=0x7f68e1d6d410)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_ethdev.c:695
#6  0x00000000007d8833 in mlx5_link_update (dev=0xf7cf40 <rte_eth_devices>, wait_to_complete=1)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_ethdev.c:804
#7  0x000000000051b1cf in rte_eth_link_get (port_id=<optimized out>, eth_link=0x7f68e1d6d480)
    at /root/dpdk-stable-18.11.2/lib/librte_ethdev/rte_ethdev.c:1913
#8  0x000000000047be2e in l2fwd_main_loop () at /root/dpdk-stable-18.11.2/examples/l2fwd/main.c:210
#9  0x000000000047c1dc in l2fwd_launch_one_lcore (dummy=0x0) at /root/dpdk-stable-18.11.2/examples/l2fwd/main.c:296
#10 0x0000000000557ae1 in eal_thread_loop (arg=<optimized out>)
    at /root/dpdk-stable-18.11.2/lib/librte_eal/linuxapp/eal/eal_thread.c:153
#11 0x00007f68e35c56ba in start_thread (arg=0x7f68e1d6f700) at pthread_create.c:333
#12 0x00007f68e32fb41d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109
(gdb) f 1
#1  0x00000000007f2ad6 in mlx5_nl_recv (nlsk_fd=18, sn=628175011, cb=0x7f2c70 <mlx5_nl_ifindex_cb>, arg=0x7f68e1d6d020)
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_nl.c:266
266                             recv_bytes = recvmsg(nlsk_fd, &msg, 0);
(gdb) info local
nh = <optimized out>
recv_bytes = <optimized out>
sa = {nl_family = 16, nl_pad = 0, nl_pid = 0, nl_groups = 0}
buf = "\024\000\000\000\003\000\002\000X\344\203|\230 ", '\000' <repeats 28106 times>...
iov = {iov_base = 0x7f68e1d64fc0, iov_len = 32768}
msg = {msg_name = 0x7f68e1d64f60, msg_namelen = 12, msg_iov = 0x7f68e1d64f70, msg_iovlen = 1, msg_control = 0x0, msg_controllen = 0,
  msg_flags = 0}
multipart = <optimized out>
ret = <optimized out>
//接收数据的seq为2089018456,但是send时的seq为628175011,
//说明接收到了错误数据。seq为2089018456的数据应该是线程1接
//收的数据。
(gdb) p *(struct nlmsghdr *)buf
$5 = {nlmsg_len = 20, nlmsg_type = 3, nlmsg_flags = 2, nlmsg_seq = 2089018456, nlmsg_pid = 8344}
(gdb) f 2
#2  0x00000000007f41de in mlx5_nl_ifindex (nl=18, name=name@entry=0x43003e75f8 "mlx5_1")
    at /root/dpdk-stable-18.11.2/drivers/net/mlx5/mlx5_nl.c:782
782             ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data);
(gdb) info local
seq = 628175011
data = {name = 0x43003e75f8 "mlx5_1", ibindex = 0, ifindex = 0}
req = {nh = {nlmsg_len = 16, nlmsg_type = 5121, nlmsg_flags = 773, nlmsg_seq = 628175011, nlmsg_pid = 0},
  buf = "\020\000\000\000\001\024\005\003\243\060q%", '\000' <repeats 19 times>}
na = <optimized out>
ret = <optimized out>

通过分析调用栈可知,都会调用 mlx5_ifreq->mlx5_get_ifname->mlx5_nl_ifindex->mlx5_nl_recv->recvmsg最终堵塞在recvmsg上。

下面分析下函数mlx5_nl_ifindex,为什么多个线程同时调用会出问题

//驱动初始化时,创建netlink类型的socket,将fd保存到
//nl_socket_rdma,多线程共用这一个fd
mlx5_pci_probe->mlx5_dev_spawn
    priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);

//通过 nl_socket_rdma 到kernel获取信息
int
mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
{
    struct mlx5_priv *priv = dev->data->dev_private;
    unsigned int ifindex =
        priv->nl_socket_rdma >= 0 ?
        mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0;
    ...
}

unsigned int
mlx5_nl_ifindex(int nl, const char *name)
{
    static const uint32_t pindex = 1;
    //随机分配一个序列号,用来标识一对sendmsg和recvmsg
    uint32_t seq = random();
    struct mlx5_nl_ifindex_data data = {
        .name = name,
        .ibindex = 0, /* Determined during first pass. */
        .ifindex = 0, /* Determined during second pass. */
    };
    union {
        struct nlmsghdr nh;
        uint8_t buf[NLMSG_HDRLEN +
                NLA_HDRLEN + NLA_ALIGN(sizeof(data.ibindex)) +
                NLA_HDRLEN + NLA_ALIGN(sizeof(pindex))];
    } req = {
        .nh = {
            .nlmsg_len = NLMSG_LENGTH(0),
            .nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
                               RDMA_NLDEV_CMD_GET),
            .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP,
        },
    };
    struct nlattr *na;
    int ret;
    //先发送RDMA_NLDEV_CMD_GET消息,请求获取ibindex 
    ret = mlx5_nl_send(nl, &req.nh, seq);
    if (ret < 0)
        return 0;
    //请求后的数据需要recvmsg来接收
    ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data);
    if (ret < 0)
        return 0;
    ...
}

static int
mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg),
         void *arg)
        do {
            recv_bytes = recvmsg(nlsk_fd, &msg, 0);
            if (recv_bytes == -1) {
                rte_errno = errno;
                return -rte_errno;
            }
            nh = (struct nlmsghdr *)buf;
        //接收的消息携带的seq必须和send时的seq相同,否则继续recvmsg
        } while (nh->nlmsg_seq != sn);
}

由上面代码可知,每次获取ifindex时,都会先sendmsg,再recvmsg数据,并且seq必须相同。
由上面gdb分析结果可知,线程1发送消息给kernel获取数据,kernel返回的数据分为两次才能接收完,第一次的数据被线程1接收到,但是第二次的数据被线程4接收到,而线程1还在等待接收第二次数据,所以一直堵塞在recvmsg上。

综上,只要多线程情况下,同时调用mlx5_ifreq的api都有可能导致线程hang住。

解决办法

a. 修改业务代码,加锁。或者修改dpdk driver,加锁
b. dpdk在19年的版本已经修复了此问题,patch认为dpdk应用启动后,ifindex应该一直是不变的,所以在驱动初始化时获取一次ifindex,保存下来即可,后续可以直接使用,不用再次从kernel中获取。

也可参考:dpdk mellanox网卡 多线程hang住的问题 - 简书 (jianshu.com) 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值