如果一个硬盘故障导致osd节点出现如下的down状态,且一直无法恢复( reweight列等于0,表示osd已经out此集群)

[root@os-node3 ~]# ceph osd tree 

# id    weight  type name       up/down reweight

-1      4       root default

-2      1               host os-node5

0       1                       osd.24   down    0


通过命令获得down状态的osd的ID


osd_id =` ceph osd tree | grep down | grep osd | awk '{print $3}' | awk -F . '{print $2}`


1)、在集群中删除一个osd硬盘

[root@PBS-OS-node155 ~]# ceph osd rm 24

removed osd.24


2)、在集群中删除一个osd 硬盘 crush map

[root@PBS-OS-node155 ~]# ceph osd crush rm osd.24

removed item id 24 name 'osd.24' from crush map

3)、删除此osd在ceph集群中的认证

[root@PBS-OS-node155 ~]# ceph auth del osd.24

updated

4)、卸载osd所挂载的硬盘

[root@PBS-OS-node155 ~]# umount /var/lib/ceph/osd/ceph-24


摘掉osd的脚本如下


osd_id=`ceph osd tree | grep down | grep osd | awk '{print $3}' | awk -F . '{print $2}'`

ceph osd rm ${osd_id}

ceph osd crush rm osd.${osd_id}

ceph auth del osd.${osd_id}

umount /var/lib/ceph/osd/ceph-${osd_id}


更换完硬盘后再把此硬盘重新加入集群osd


osd_id=`ceph osd create`


mkfs.xfs -f /dev/sdf

mount /dev/sdf /var/lib/ceph/osd/ceph-${osd_id}

mount -o remount,user_xattr /var/lib/ceph/osd/ceph-${osd_id}


ceph-osd -i ${osd_id} --mkfs --mkkey 

ceph auth add osd.${osd_id} osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-${osd_id}/keyring

touch /var/lib/ceph/osd/ceph-${osd_id}/sysvinit

/etc/init.d/ceph start osd.${osd_id}