##删除osd.8盘,查看盘的状态
[root@ceph24 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 558.4G 0 disk
└─sda1 8:1 0 558.4G 0 part /
sdb 8:16 0 1.8T 0 disk
├─sdb1 8:17 0 1.8T 0 part /var/lib/ceph/osd/ceph-8
└─sdb2 8:18 0 10G 0 part
[root@ceph24 ~]# ceph osd tree
-6 9.04099 host ceph24
8 1.80820 osd.8 up 1.00000 1.00000
12 1.80820 osd.12 up 1.00000 1.00000
16 1.80820 osd.16 up 1.00000 1.00000
20 1.80820 osd.20 up 1.00000 1.00000
24 1.80820 osd.24 up 1.00000 1.00000
### 把osd盘out出来
[root@ceph24 ~]# ceph osd out osd.8
marked out osd.8.
[root@ceph24 ~]# ceph osd tree
-6 9.04099 host ceph24
8 1.80820 osd.8 up 0 1.00000
12 1.80820 osd.12 up 1.00000 1.00000
16 1.80820 osd.16 up 1.00000 1.00000
20 1.80820 osd.20 up 1.00000 1.00000
24 1.80820 osd.24 up 1.00000 1.00000
### 停止服务
[root@ceph24 ~]# ceph osd down osd.8
marked down osd.8.
[root@ceph24 ~]# systemctl stop ceph-osd@8
[root@ceph24 ~]# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 45.20493 root default
-6 9.04099 host ceph24
8 1.80820 osd.8 down 0 1.00000
12 1.80820 osd.12 up 1.00000 1.00000
16 1.80820 osd.16 up 1.00000 1.00000
20 1.80820 osd.20 up 1.00000 1.00000
24 1.80820 osd.24 up 1.00000 1.00000
##删除map 和auth
[root@ceph24 ~]# ceph osd crush remove osd.8
removed item id 8 name 'osd.8' from crush map
[root@ceph24 ~]# ceph auth del osd.8
updated
##删除rm 掉osd.8 盘
[root@ceph24 ~]# ceph osd rm osd.8
removed osd.8
[root@ceph24 ~]# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 43.39673 root default
-6 7.23279 host ceph24
12 1.80820 osd.12 up 1.00000 1.00000
16 1.80820 osd.16 up 1.00000 1.00000
20 1.80820 osd.20 up 1.00000 1.00000
24 1.80820 osd.24 up 1.00000 1.00000
###umount 该盘
[root@ceph24 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 558.4G 0 disk
└─sda1 8:1 0 558.4G 0 part /
sdb 8:16 0 1.8T 0 disk
├─sdb1 8:17 0 1.8T 0 part /var/lib/ceph/osd/ceph-8
└─sdb2 8:18 0 10G 0 part
[root@ceph24 ~]# umount /var/lib/ceph/osd/ceph-8
##用ceph-disk 格式化该盘
[root@ceph24 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 558.4G 0 disk
└─sda1 8:1 0 558.4G 0 part /
sdb 8:16 0 1.8T 0 disk
├─sdb1 8:17 0 1.8T 0 part
└─sdb2 8:18 0 10G 0 part
[root@ceph24 ~]# ceph-disk zap /dev/sdb
****************************************************************************
Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk
verification and recovery are STRONGLY recommended.
****************************************************************************
GPT data structures destroyed! You may now partition the disk using fdisk or
other utilities.
Creating new GPT entries.
The operation has completed successfully.
[root@ceph24 ~]#
####该盘可以用做其他用途了