ceph集群维护记录

1、告警:application not enabled on 2 pool(s)

解决方法:

[root@master-local believer]# ceph -s
  cluster:
    id:     3b6d2da2-1d65-45bc-a04f-8872dedd7059
    health: HEALTH_WARN
            BlueFS spillover detected on 24 OSD(s)
            application not enabled on 2 pool(s)

  services:
    mon: 3 daemons, quorum master-local,slave2-local,slave3-local (age 6w)
    mgr: master-local(active, since 6w)
    osd: 24 osds: 24 up (since 2d), 24 in (since 2d); 8 remapped pgs

  data:
    pools:   8 pools, 1056 pgs
    objects: 832.73k objects, 52 TiB
    usage:   104 TiB used, 27 TiB / 131 TiB avail
    pgs:     5171/1665460 objects misplaced (0.310%)
             1048 active+clean
             6    active+remapped+backfill_wait
             2    active+remapped+backfilling

  io:
    recovery: 115 MiB/s, 1 objects/s


[root@master-local believer]# ceph health detail
HEALTH_WARN BlueFS spillover detected on 24 OSD(s); application not enabled on 2 pool(s)
BLUEFS_SPILLOVER BlueFS spillover detected on 24 OSD(s)
     osd.0 spilled over 4.1 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.1 spilled over 4.4 GiB metadata from 'db' device (2.8 GiB used of 16 GiB) to slow device
     osd.2 spilled over 4.6 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.3 spilled over 5.1 GiB metadata from 'db' device (2.6 GiB used of 16 GiB) to slow device
     osd.4 spilled over 4.1 GiB metadata from 'db' device (2.8 GiB used of 16 GiB) to slow device
     osd.5 spilled over 4.1 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.6 spilled over 4.2 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.7 spilled over 4.0 GiB metadata from 'db' device (2.8 GiB used of 16 GiB) to slow device
     osd.8 spilled over 4.2 GiB metadata from 'db' device (2.9 GiB used of 16 GiB) to slow device
     osd.9 spilled over 3.8 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.10 spilled over 5.5 GiB metadata from 'db' device (2.6 GiB used of 16 GiB) to slow device
     osd.11 spilled over 3.8 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.12 spilled over 4.5 GiB metadata from 'db' device (2.5 GiB used of 16 GiB) to slow device
     osd.13 spilled over 5.0 GiB metadata from 'db' device (2.6 GiB used of 16 GiB) to slow device
     osd.14 spilled over 3.9 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.15 spilled over 3.4 GiB metadata from 'db' device (2.9 GiB used of 16 GiB) to slow device
     osd.16 spilled over 4.2 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.17 spilled over 3.8 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.18 spilled over 3.4 GiB metadata from 'db' device (2.8 GiB used of 16 GiB) to slow device
     osd.19 spilled over 4.1 GiB metadata from 'db' device (2.8 GiB used of 16 GiB) to slow device
     osd.20 spilled over 4.2 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.21 spilled over 3.8 GiB metadata from 'db' device (2.8 GiB used of 16 GiB) to slow device
     osd.22 spilled over 4.0 GiB metadata from 'db' device (2.7 GiB used of 16 GiB) to slow device
     osd.23 spilled over 3.5 GiB metadata from 'db' device (2.8 GiB used of 16 GiB) to slow device
POOL_APP_NOT_ENABLED application not enabled on 2 pool(s)
    application not enabled on pool 'ourea'
    application not enabled on pool 'oureadb'
    use 'ceph osd pool application enable <pool-name> <app-name>', where <app-name> is 'cephfs', 'rbd', 'rgw', or freeform for custom applications.
[root@master-local believer]# ceph osd pool application enable ourea rgw
enabled application 'rgw' on pool 'ourea'
[root@master-local believer]# ceph osd pool application enable oureadb rgw
enabled application 'rgw' on pool 'oureadb'
[root@master-local believer]# ceph -s
  cluster:
    id:     3b6d2da2-1d65-45bc-a04f-8872dedd7059
    health: HEALTH_WARN
            BlueFS spillover detected on 24 OSD(s)

  services:
    mon: 3 daemons, quorum master-local,slave2-local,slave3-local (age 6w)
    mgr: master-local(active, since 6w)
    osd: 24 osds: 24 up (since 2d), 24 in (since 2d); 8 remapped pgs

  data:
    pools:   8 pools, 1056 pgs
    objects: 832.73k objects, 52 TiB
    usage:   104 TiB used, 27 TiB / 131 TiB avail
    pgs:     4866/1665460 objects misplaced (0.292%)
             1048 active+clean
             6    active+remapped+backfill_wait
             2    active+remapped+backfilling

  io:
    recovery: 110 MiB/s, 1 objects/s

2、Ceph集群存储负载均衡

[root@master-local believer]# ceph osd   set-require-min-compat-client luminous
set require_min_compat_client to luminous
[root@master-local believer]# ceph features
{
    "mon": [
        {
            "features": "0x3ffddff8ffacffff",
            "release": "luminous",
            "num": 3
        }
    ],
    "osd": [
        {
            "features": "0x3ffddff8ffacffff",
            "release": "luminous",
            "num": 24
        }
    ],
    "client": [
        {
            "features": "0x3ffddff8ffacffff",
            "release": "luminous",
            "num": 17
        }
    ],
    "mgr": [
        {
            "features": "0x3ffddff8ffacffff",
            "release": "luminous",
            "num": 1
        }
    ]
}
[root@master-local believer]# ceph osd getmap -o osd.map
got osdmap epoch 40044
[root@master-local believer]# ceph osd pool ls detail
pool 7 'ourea' replicated size 2 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode warn last_change 40043 flags hashpspool,full stripe_width 0
pool 8 'oureadb' replicated size 2 min_size 2 crush_rule 0 object_hash rjenkins pg_num 1024 pgp_num 1024 autoscale_mode warn last_change 40043 lfor 0/39918/39916 flags hashpspool,full stripe_width 0

[root@master-local believer]# osdmaptool osd.map --upmap out.txt --upmap-pool oureadb
osdmaptool: osdmap file 'osd.map'
writing upmap command output to: out.txt
checking for upmap cleanups
upmap, max-count 100, max deviation 0.01
 limiting to pools oureadb (8)
[root@master-local believer]# ll
-rw-r--r--  1 root root     11996 Apr 25 10:23 osd.map
-rw-r--r--  1 root root      2494 Apr 25 10:39 out.txt
[root@master-local believer]# cat out.txt
ceph osd pg-upmap-items 8.c 3 23
ceph osd pg-upmap-items 8.1b 10 9
ceph osd pg-upmap-items 8.1e 3 18
ceph osd pg-upmap-items 8.23 20 18
ceph osd pg-upmap-items 8.25 1 17
ceph osd pg-upmap-items 8.38 3 9
ceph osd pg-upmap-items 8.4e 3 18
ceph osd pg-upmap-items 8.7a 12 15
ceph osd pg-upmap-items 8.83 6 7
ceph osd pg-upmap-items 8.8b 3 4
ceph osd pg-upmap-items 8.92 22 18
ceph osd pg-upmap-items 8.94 3 15
ceph osd pg-upmap-items 8.b8 6 9
ceph osd pg-upmap-items 8.d6 13 15
ceph osd pg-upmap-items 8.dc 0 5
ceph osd pg-upmap-items 8.f8 10 9
ceph osd pg-upmap-items 8.101 12 15
ceph osd pg-upmap-items 8.10c 1 11
ceph osd pg-upmap-items 8.119 8 9
ceph osd pg-upmap-items 8.12e 10 7
ceph osd pg-upmap-items 8.138 13 15
ceph osd pg-upmap-items 8.16a 2 23
ceph osd pg-upmap-items 8.18f 1 23
ceph osd pg-upmap-items 8.1bd 13 15
ceph osd pg-upmap-items 8.1c7 13 15
ceph osd pg-upmap-items 8.1c9 2 23 12 11
ceph osd pg-upmap-items 8.1cb 13 14
ceph osd pg-upmap-items 8.1cc 10 9
ceph osd pg-upmap-items 8.1cf 3 18
ceph osd pg-upmap-items 8.1e8 3 23
ceph osd pg-upmap-items 8.1ed 13 14
ceph osd pg-upmap-items 8.1ef 10 7
ceph osd pg-upmap-items 8.1f1 3 23
ceph osd pg-upmap-items 8.20b 6 7
ceph osd pg-upmap-items 8.215 13 15
ceph osd pg-upmap-items 8.218 10 9
ceph osd pg-upmap-items 8.219 10 7
ceph osd pg-upmap-items 8.21d 13 15
ceph osd pg-upmap-items 8.238 10 7
ceph osd pg-upmap-items 8.23b 2 18
ceph osd pg-upmap-items 8.23f 12 15
ceph osd pg-upmap-items 8.246 10 7
ceph osd pg-upmap-items 8.24b 2 15
ceph osd pg-upmap-items 8.254 1 23
ceph osd pg-upmap-items 8.268 10 7
ceph osd pg-upmap-items 8.27d 10 7
ceph osd pg-upmap-items 8.284 2 9
ceph osd pg-upmap-items 8.28d 3 4 10 7
ceph osd pg-upmap-items 8.28f 10 9
ceph osd pg-upmap-items 8.29e 12 14
ceph osd pg-upmap-items 8.29f 20 18
ceph osd pg-upmap-items 8.2b3 3 23
ceph osd pg-upmap-items 8.2bf 13 15
ceph osd pg-upmap-items 8.2d2 3 18
ceph osd pg-upmap-items 8.2d7 10 9
ceph osd pg-upmap-items 8.2e0 13 15
ceph osd pg-upmap-items 8.2e4 3 18
ceph osd pg-upmap-items 8.2e7 2 23
ceph osd pg-upmap-items 8.2f1 13 15
ceph osd pg-upmap-items 8.2f2 13 23
ceph osd pg-upmap-items 8.2f9 10 9
ceph osd pg-upmap-items 8.303 20 18
ceph osd pg-upmap-items 8.308 13 15
ceph osd pg-upmap-items 8.319 3 18
ceph osd pg-upmap-items 8.383 3 4
ceph osd pg-upmap-items 8.386 13 15
ceph osd pg-upmap-items 8.388 2 14
ceph osd pg-upmap-items 8.39f 12 15
ceph osd pg-upmap-items 8.3c4 13 14
ceph osd pg-upmap-items 8.3fd 22 23
ceph osd pg-upmap-items 8.3fe 3 17
[root@master-local zjh]# source out.txt
set 8.c pg_upmap_items mapping to [3->23]
set 8.1b pg_upmap_items mapping to [10->9]
set 8.1e pg_upmap_items mapping to [3->18]
set 8.23 pg_upmap_items mapping to [20->18]
set 8.25 pg_upmap_items mapping to [1->17]
set 8.38 pg_upmap_items mapping to [3->9]
set 8.4e pg_upmap_items mapping to [3->18]
set 8.7a pg_upmap_items mapping to [12->15]
set 8.83 pg_upmap_items mapping to [6->7]
set 8.8b pg_upmap_items mapping to [3->4]
set 8.92 pg_upmap_items mapping to [22->18]
set 8.94 pg_upmap_items mapping to [3->15]
set 8.b8 pg_upmap_items mapping to [6->9]
set 8.d6 pg_upmap_items mapping to [13->15]
set 8.dc pg_upmap_items mapping to [0->5]
set 8.f8 pg_upmap_items mapping to [10->9]
set 8.101 pg_upmap_items mapping to [12->15]
set 8.10c pg_upmap_items mapping to [1->11]
set 8.119 pg_upmap_items mapping to [8->9]
set 8.12e pg_upmap_items mapping to [10->7]
set 8.138 pg_upmap_items mapping to [13->15]
set 8.16a pg_upmap_items mapping to [2->23]
set 8.18f pg_upmap_items mapping to [1->23]
set 8.1bd pg_upmap_items mapping to [13->15]
set 8.1c7 pg_upmap_items mapping to [13->15]
set 8.1c9 pg_upmap_items mapping to [2->23,12->11]
set 8.1cb pg_upmap_items mapping to [13->14]
set 8.1cc pg_upmap_items mapping to [10->9]
set 8.1cf pg_upmap_items mapping to [3->18]
set 8.1e8 pg_upmap_items mapping to [3->23]
set 8.1ed pg_upmap_items mapping to [13->14]
set 8.1ef pg_upmap_items mapping to [10->7]
set 8.1f1 pg_upmap_items mapping to [3->23]
set 8.20b pg_upmap_items mapping to [6->7]
set 8.215 pg_upmap_items mapping to [13->15]
set 8.218 pg_upmap_items mapping to [10->9]
set 8.219 pg_upmap_items mapping to [10->7]
set 8.21d pg_upmap_items mapping to [13->15]
set 8.238 pg_upmap_items mapping to [10->7]
set 8.23b pg_upmap_items mapping to [2->18]
set 8.23f pg_upmap_items mapping to [12->15]
set 8.246 pg_upmap_items mapping to [10->7]
set 8.24b pg_upmap_items mapping to [2->15]
set 8.254 pg_upmap_items mapping to [1->23]
set 8.268 pg_upmap_items mapping to [10->7]
set 8.27d pg_upmap_items mapping to [10->7]
set 8.284 pg_upmap_items mapping to [2->9]
set 8.28d pg_upmap_items mapping to [3->4,10->7]
set 8.28f pg_upmap_items mapping to [10->9]
set 8.29e pg_upmap_items mapping to [12->14]
set 8.29f pg_upmap_items mapping to [20->18]
set 8.2b3 pg_upmap_items mapping to [3->23]
set 8.2bf pg_upmap_items mapping to [13->15]
set 8.2d2 pg_upmap_items mapping to [3->18]
set 8.2d7 pg_upmap_items mapping to [10->9]
set 8.2e0 pg_upmap_items mapping to [13->15]
set 8.2e4 pg_upmap_items mapping to [3->18]
set 8.2e7 pg_upmap_items mapping to [2->23]
set 8.2f1 pg_upmap_items mapping to [13->15]
set 8.2f2 pg_upmap_items mapping to [13->23]
set 8.2f9 pg_upmap_items mapping to [10->9]
set 8.303 pg_upmap_items mapping to [20->18]
set 8.308 pg_upmap_items mapping to [13->15]
set 8.319 pg_upmap_items mapping to [3->18]
set 8.383 pg_upmap_items mapping to [3->4]
set 8.386 pg_upmap_items mapping to [13->15]
set 8.388 pg_upmap_items mapping to [2->14]
set 8.39f pg_upmap_items mapping to [12->15]
set 8.3c4 pg_upmap_items mapping to [13->14]
set 8.3fd pg_upmap_items mapping to [22->23]
set 8.3fe pg_upmap_items mapping to [3->17]
[root@master-local believer]#
[root@master-local believer]# ceph -w
  cluster:
    id:     3b6d2da2-1d65-45bc-a04f-8872dedd7059
    health: HEALTH_ERR
            2 backfillfull osd(s)
            1 full osd(s)
            2 nearfull osd(s)
            2 pool(s) full
            BlueFS spillover detected on 24 OSD(s)
            application not enabled on 2 pool(s)

  services:
    mon: 3 daemons, quorum master-local,slave2-local,slave3-local (age 6w)
    mgr: master-local(active, since 6w)
    osd: 24 osds: 24 up (since 2d), 24 in (since 2d); 71 remapped pgs

  data:
    pools:   8 pools, 1056 pgs
    objects: 835.93k objects, 52 TiB
    usage:   105 TiB used, 26 TiB / 131 TiB avail
    pgs:     57252/1671860 objects misplaced (3.424%)
             983 active+clean
             63  active+remapped+backfill_wait
             8   active+remapped+backfilling
             2   active+clean+scrubbing+deep

  io:
    recovery: 395 MiB/s, 6 objects/s

3、修复报错pg,OSD_SCRUB_ERRORS 1 scrub errors

在这里插入图片描述
解决方法:
ceph pg repair

ceph pg repair 8.32
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值