【ceph】ceph 网络问题和工具

本文详细介绍了在Ceph环境中如何排查网络问题,包括使用net-tools工具进行网络状态检查,通过`ceph daemon`命令查看socket状态,以及利用iperf3进行网络性能测试。此外,还讲解了如何通过ethtool和netstat命令分析网口错误和TCP错误,以及使用tcpdump抓包进行深入分析。
摘要由CSDN通过智能技术生成

yum install -y net-tools

 netstat -anp|grep ganesha.nfsd

  ceph daemon /var/run/ceph/ceph-client.admin.1114156.139819448467456.asok config get xxx

 查看socket

netstat -lnxp

unix STREAM LISTENING 84402953 2946130/rados /var/run/ceph/ceph-admin.2946130.94651089703712.asok
unix STREAM LISTENING 336026461 2807156/rados /var/run/ceph/ceph-admin.2807156.94357368345376.asok
unix STREAM LISTENING 186741342 3320797/rados /var/run/ceph/ceph-admin.3320797.94106476679968.asok
unix STREAM LISTENING 97390990 690481/rados /var/run/ceph/ceph-admin.690481.94219819733792.asok
unix STREAM LISTENING 111978 1/systemd /run/systemd/private
unix STREAM LISTENING 343380102 1831050/rados /var/run/ceph/ceph-admin.1831050.94326674244384.asok
unix STREAM LISTENING 224768569 3726894/rados /var/run/ceph/ceph-admin.3726894.94886983721760.asok
unix STREAM LISTENING 150163438 3744464/rados /var/run/ceph/ceph-admin.3744464.93915040211744.asok
unix STREAM LISTENING 111987 1/systemd /run/lvm/lvmetad.socket
unix STREAM LISTENING 74050952 2166906/rados /var/run/ceph/ceph-admin.2166906.93862492377888.asok
unix STREAM LISTENING 69560405 21215/rados /var/run/ceph/ceph-admin.21215.94443882525472.asok
unix STREAM LISTENING 380506858 1926693/rados /var/run/ceph/ceph-admin.1926693.94827481985824.asok
unix STREAM LISTENING 241625897 2794366/rados /var/run/ceph/ceph-admin.2794366.94245238936352.asok

ceph daemon /var/run/ceph/ceph-admin.2794366.94245238936352.asok  status

ceph --admin-daemon ceph-osd.1.asok config show

ceph --admin-daemon ceph-mon.node1.asok config show

[root@rdma55 system]# ceph daemon /var/run/ceph/ceph-admin.2794366.94245238936352.asok help
{
    "config diff": "dump diff of current config and default config",
    "config diff get": "dump diff get <field>: dump diff of current and default config setting <field>",
    "config get": "config get <field>: get the config value",
    "config help": "get config setting schema and descriptions",
    "config set": "config set <field> <val> [<val> ...]: set a config variable",
    "config show": "dump current config settings",
    "get_command_descriptions": "list available commands",
    "git_version": "get git sha1",
    "help": "list available commands",
    "log dump": "dump recent log entries to log file",
    "log flush": "flush log entries to log file",
    "log reopen": "reopen log file",
    "perf dump": "dump perfcounters value",
    "perf histogram dump": "dump perf histogram values",
    "perf histogram schema": "dump perf histogram schema",
    "perf reset": "perf reset <name>: perf reset all or one perfcounter name",
    "perf schema": "dump perfcounters schema",
    "version": "get ceph version"
}

[root@rdma55 system]# ceph daemon /var/run/ceph/ceph-admin.2794366.94245238936352.asok version
{"version":"12.2.1-UniStorOS_V100R001B62","release":"luminous","release_type":"stable"}[root@rdma55 system]# 

ceph链接查看


public_addr = 172.16.156.123
manage_addr = 172.16.31.123
cluster_addr = 192.168.156.123

[root@node123 ~]# ceph osd tree
ID  CLASS WEIGHT  TYPE NAME                      STATUS REWEIGHT PRI-AFF
-11             0 root maintain
-10       1.17224 root dcache-d1
-17       1.17224     rack r1.dcache-d1
-21       0.39075         host node122.dcache-d1
  5       0.09769             osd.5                  up  1.00000 1.00000
 11       0.09769             osd.11                 up  1.00000 1.00000
 17       0.09769             osd.17                 up  1.00000 1.00000
 23       0.09769             osd.23                 up  1.00000 1.00000
-23       0.39075         host node123.dcache-d1
  4       0.09769             osd.4                  up  1.00000 1.00000
 10       0.09769             osd.10                 up  1.00000 1.00000
 16       0.09769             osd.16                 up  1.00000 1.00000
 22       0.09769             osd.22                 up  1.00000 1.00000
-25       0.39075         host node124.dcache-d1
  3       0.09769             osd.3                  up  1.00000 1.00000
  9       0.09769             osd.9                  up  1.00000 1.00000
 15       0.09769             osd.15                 up  1.00000 1.00000
 21       0.09769             osd.21                 up  1.00000 1.00000
 -9       9.73792 root d1
-12       9.73792     rack r1.d1
-19       3.24597         host node122.d1
  2  nvme 0.81149             osd.2                  up  1.00000 1.00000
  8  nvme 0.81149             osd.8                  up  1.00000 1.00000
 14  nvme 0.81149             osd.14                 up  1.00000 1.00000
 20  nvme 0.81149             osd.20                 up  1.00000 1.00000
 -7       3.24597         host node123.d1
  1  nvme 0.81149             osd.1                  up  1.00000 1.00000
  7  nvme 0.81149             osd.7                  up  1.00000 1.00000
 13  nvme 0.81149             osd.13                 up  1.00000 1.00000
 19  nvme 0.81149             osd.19                 up  1.00000 1.00000
 -4       3.24597         host node124.d1
  0  nvme 0.81149             osd.0                  up  1.00000 1.00000
  6  nvme 0.81149             osd.6                  up  1.00000 1.00000
 12  nvme 0.81149             osd.12                 up  1.00000 1.00000
 18  nvme 0.81149             osd.18                 up  1.00000 1.00000
 -1             0 root default[root@node123 ~]# ceph daemon osd.1 messenger dump
{
    "cluster": {
        "name": "osd.1",
        "addr": "192.168.156.123:6808/1060851",
        "network stack type": "posix",
        "number of established connection": 12,
        "number of accepting connection": 0,
        "number of deleted connection": 1
    },
    "client": {
        "name": "osd.1",
        "addr": "172.16.156.123:6819/1060851",
        "network stack type": "posix",
        "number of established connection": 7,
        "number of accepting connection": 0,
        "number of deleted connection": 2
    },
    "ms_objecter": {
        "name": "osd.1",
        "addr": "-",
        "network stack type": "posix",
        "number of established connection": 0,
        "number of accepting connection": 0,
        "number of deleted connection": 0
    },
    "hb_front_client": {
        "name": "osd.1",
        "addr": "172.16.156.123:0/1060851",
        "network stack type": "posix",
        "number of established connection": 14,
        "number of accepting connection": 0,
        "number of deleted connection": 4
    },
    "hb_back_client": {
        "name": "osd.1",
        "addr": "192.168.156.123:0/1060851",
        "network stack type": "posix",
        "number of established connection": 14,
        "number of accepting connection": 0,
        "number of deleted connection": 4
    },
    "hb_front_server": {
        "name": "osd.1",
        "addr": "172.16.156.123:6820/1060851",
        "network stack type": "posix",
        "number of established connection": 11,
        "number of accepting connection": 0,
        "number of deleted connection": 3
    },
    "hb_back_server": {
        "name": "osd.1",
        "addr": "192.168.156.123:6809/1060851",
        "network stack type": "posix",
        "number of established connection": 11,
        "number of accepting connection": 0,
        "number of deleted connection": 3
    }
}
[root@node123 ~]# ps -ef | grep 1060851
ceph     1060851       1  1 13:51 ?        00:00:25 /opt/h3c/bin/ceph-osd --cluster=ceph -i 1 -f --setuser ceph --setgroup ceph
root     1217911 3511408  0 14:13 pts/0    00:00:00 grep --color=auto 1060851
[root@node123 ~]#

[root@node123 ~]# ceph daemon osd.1 connection dump cluster
{
    "established connections": [
        {
            "conn": "0x7fa98dd8e000",
            "peer_type": "osd",
            "peer_name": "osd.18",
            "peer_addr": "192.168.156.124:6814/2011059",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 2
        },
        {
            "conn": "0x7fa9c344f000",
            "peer_type": "osd",
            "peer_name": "osd.7",
            "peer_addr": "192.168.156.123:6810/1060879",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 1
        },
        {
            "conn": "0x7fa98c8d1000",
            "peer_type": "osd",
            "peer_name": "osd.0",
            "peer_addr": "192.168.156.124:6808/2010936",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 1
        },
        {
            "conn": "0x7fa9c3454000",
            "peer_type": "osd",
            "peer_name": "osd.13",
            "peer_addr": "192.168.156.123:6812/1060936",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa9a1474000",
            "peer_type": "osd",
            "peer_name": "osd.18",
            "peer_addr": "192.168.156.124:6806/1530644",
            "state": "STATE_CLOSED",
            "worker_name": "io-worker",
            "worker_id": 2
        },
        {
            "conn": "0x7fa999f3b000",
            "peer_type": "osd",
            "peer_name": "osd.8",
            "peer_addr": "192.168.156.122:6802/3705373",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa98dc8e000",
            "peer_type": "osd",
            "peer_name": "osd.20",
            "peer_addr": "192.168.156.122:6806/3705519",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 2
        },
        {
            "conn": "0x7fa98c8dc000",
            "peer_type": "osd",
            "peer_name": "osd.2",
            "peer_addr": "192.168.156.122:6800/3705344",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 2
        },
        {
            "conn": "0x7fa9c3458000",
            "peer_type": "osd",
            "peer_name": "osd.19",
            "peer_addr": "192.168.156.123:6814/1060973",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 1
        },
        {
            "conn": "0x7fa999f88000",
            "peer_type": "osd",
            "peer_name": "osd.14",
            "peer_addr": "192.168.156.122:6804/3705438",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 1
        },
        {
            "conn": "0x7fa99ac4c000",
            "peer_type": "osd",
            "peer_name": "osd.6",
            "peer_addr": "192.168.156.124:6810/2010961",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 2
        },
        {
            "conn": "0x7fa98dc9a000",
            "peer_type": "osd",
            "peer_name": "osd.12",
            "peer_addr": "192.168.156.124:6812/2011020",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 0
        }
    ],
    "accepting connections": [],
    "deleted connections": [
        {
            "conn": "0x7fa9a1474000",
            "peer_type": "osd",
            "peer_name": "osd.18",
            "peer_addr": "192.168.156.124:6806/1530644",
            "state": "STATE_CLOSED",
            "worker_name": "io-worker",
            "worker_id": 2
        }
    ]
}
[root@node123 ~]#

[root@node123 ~]# ceph daemon osd.1 connection dump client
{
    "established connections": [
        {
            "conn": "0x7fa98c891000",
            "peer_type": "dse",
            "peer_name": "dse.70684427",
            "peer_addr": "172.16.156.122:6819/3705334",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa9c5840000",
            "peer_type": "dse",
            "peer_name": "dse.70684199",
            "peer_addr": "172.16.156.124:6810/2008338",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 2
        },
        {
            "conn": "0x7fa97647e000",
            "peer_type": "mgr",
            "peer_name": "mgr.70684394",
            "peer_addr": "172.16.156.122:6809/3705389",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 1
        },
        {
            "conn": "0x7fa98dd68000",
            "peer_type": "dse",
            "peer_name": "dse.70684099",
            "peer_addr": "172.16.156.123:6810/1058464",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa9a740a000",
            "peer_type": "mon",
            "peer_name": "mon.1",
            "peer_addr": "172.16.156.123:6789/0",
            "state": "STATE_CLOSED",
            "worker_name": "io-worker",
            "worker_id": 2
        },
        {
            "conn": "0x7fa9a7243000",
            "peer_type": "mon",
            "peer_name": "mon.0",
            "peer_addr": "172.16.156.122:6789/0",
            "state": "STATE_CLOSED",
            "worker_name": "io-worker",
            "worker_id": 1
        },
        {
            "conn": "0x7fa9a71d4000",
            "peer_type": "mon",
            "peer_name": "mon.2",
            "peer_addr": "172.16.156.124:6789/0",
            "state": "STATE_OPEN",
            "worker_name": "io-worker",
            "worker_id": 0
        }
    ],
    "accepting connections": [],
    "deleted connections": [
        {
            "conn": "0x7fa9a7243000",
            "peer_type": "mon",
            "peer_name": "mon.0",
            "peer_addr": "172.16.156.122:6789/0",
            "state": "STATE_CLOSED",
            "worker_name": "io-worker",
            "worker_id": 1
        },
        {
            "conn": "0x7fa9a740a000",
            "peer_type": "mon",
            "peer_name": "mon.1",
            "peer_addr": "172.16.156.123:6789/0",
            "state": "STATE_CLOSED",
            "worker_name": "io-worker",
            "worker_id": 2
        }
    ]
}
[root@node123 ~]#

[root@node123 ~]# ceph daemon osd.1 connection dump ms_objecter
{
    "established connections": [],
    "accepting connections": [],
    "deleted connections": []
}
[root@node123 ~]#

[root@node123 ~]# ceph daemon osd.1 connection dump hb_front_client
{
    "established connections": [
        {
            "conn": "0x7fa9760f8000",
            "peer_type": "osd",
            "peer_name": "osd.6",
            "peer_addr": "172.16.156.124:6822/2010961",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa9760e4000",
            "peer_type": "osd",
            "peer_name": "osd.18",
            "peer_addr": "172.16.156.124:6826/2011059",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa9760d9000",
            "peer_type": "osd",
            "peer_name": "osd.0",
            "peer_addr": "172.16.156.124:6820/2010936",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa99ad2d000",
            "peer_type": "osd",
            "peer_name": "osd.20",
            "peer_addr": "172.16.156.122:6807/3705519",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa999f6f000",
            "peer_type": "osd",
            "peer_name": "osd.8",
            "peer_addr": "172.16.156.122:6803/3705373",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa98dd66000",
            "peer_type": "osd",
            "peer_name": "osd.14",
            "peer_addr": "172.16.156.122:6805/3705438",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa999f9f000",
            "peer_type": "osd",
            "peer_name": "osd.2",
            "peer_addr": "172.16.156.122:6801/3705344",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa976068000",
            "peer_type": "osd",
            "peer_name": "osd.18",
            "peer_addr": "172.16.156.124:6808/1530644",
            "state": "STATE_CLOSED",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa976072000",
            "peer_type": "osd",
            "peer_name": "osd.6",
            "peer_addr": "172.16.156.124:6804/1530563",
            "state": "STATE_CLOSED",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa99ace1000",
            "peer_type": "osd",
            "peer_name": "osd.19",
            "peer_addr": "172.16.156.123:6826/1060973",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa9760ee000",
            "peer_type": "osd",
            "peer_name": "osd.12",
            "peer_addr": "172.16.156.124:6824/2011020",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa97609a000",
            "peer_type": "osd",
            "peer_name": "osd.7",
            "peer_addr": "172.16.156.123:6822/1060879",
            "state": "STATE_CLOSED",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa9760a4000",
            "peer_type": "osd",
            "peer_name": "osd.13",
            "peer_addr": "172.16.156.123:6824/1060936",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa976086000",
            "peer_type": "osd",
            "peer_name": "osd.12",
            "peer_addr": "172.16.156.124:6806/1530602",
            "state": "STATE_CLOSED",
            "worker_name": "hb-worker",
            "worker_id": 0
        }
    ],
    "accepting connections": [],
    "deleted connections": [
        {
            "conn": "0x7fa976068000",
            "peer_type": "osd",
            "peer_name": "osd.18",
            "peer_addr": "172.16.156.124:6808/1530644",
            "state": "STATE_CLOSED",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa976072000",
            "peer_type": "osd",
            "peer_name": "osd.6",
            "peer_addr": "172.16.156.124:6804/1530563",
            "state": "STATE_CLOSED",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa976086000",
            "peer_type": "osd",
            "peer_name": "osd.12",
            "peer_addr": "172.16.156.124:6806/1530602",
            "state": "STATE_CLOSED",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa97609a000",
            "peer_type": "osd",
            "peer_name": "osd.7",
            "peer_addr": "172.16.156.123:6822/1060879",
            "state": "STATE_CLOSED",
            "worker_name": "hb-worker",
            "worker_id": 0
        }
    ]
}
[root@node123 ~]#

[root@node123 ~]# ceph daemon osd.1 connection dump hb_back_client
{
    "established connections": [
        {
            "conn": "0x7fa9760e9000",
            "peer_type": "osd",
            "peer_name": "osd.12",
            "peer_addr": "192.168.156.124:6813/2011020",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa9760de000",
            "peer_type": "osd",
            "peer_name": "osd.18",
            "peer_addr": "192.168.156.124:6815/2011059",
            "state": "STATE_OPEN",
            "worker_name": "hb-worker",
            "worker_id": 0
        },
        {
            "conn": "0x7fa9760d4000",
            "peer_type": "osd",
            "peer_name": "osd.0",
            "peer_addr": "192.168.156.124:6809/2010936",
            "state": "STATE_OPEN",
            "worker_name&

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值