ceph部署

部署思路,ceph1和ceph2先做集群,ceph3以新增节点部署。

hostpublic networkgluster networkdisk
ceph1192.168.232.10192.168.233.10/dev/sdb、/dev/sdc、/dev/sdd
ceph2192.168.232.20192.168.233.20/dev/sdb、/dev/sdc、/dev/sdd
ceph3192.168.232.30192.168.233.30/dev/sdb、/dev/sdc、/dev/sdd

基础配置

IP地址

ceph1:

root@localhost:~# cat /etc/netplan/00-installer-config.yaml
# This is the network config written by 'subiquity'
network:
  ethernets:
    ens33:
      addresses:
      - 192.168.232.10/24
      gateway4: 192.168.232.2
      nameservers:
        addresses:
        - 192.168.232.2
    ens38:
      addresses:
      - 192.168.233.10/24
      gateway4: 192.168.232.2
      nameservers:
        addresses:
        - 192.168.233.2
  version: 2

ceph2:

root@localhost:~# cat /etc/netplan/00-installer-config.yaml
# This is the network config written by 'subiquity'
network:
  ethernets:
    ens33:
      addresses:
      - 192.168.232.20/24
      gateway4: 192.168.232.2
      nameservers:
        addresses:
        - 192.168.232.2
    ens38:
      addresses:
      - 192.168.233.20/24
      gateway4: 192.168.232.2
      nameservers:
        addresses:
        - 192.168.233.2
  version: 2

host配置

root@localhost:~# egrep -v "^#|^$" /etc/hosts
127.0.0.1 localhost
192.168.232.10 ceph1
192.168.232.20 ceph2

ceph1

root@ceph1:~# hostnamectl set-hostname ceph1

ceph2

root@ceph2:~# hostnamectl set-hostname ceph2

密钥分发

#密钥分发脚本:expect.sh

#!/bin/bash

hosts=$(egrep -v '^#|^$' /etc/hosts | awk '{print $2}')
pass='password'

echo $hosts

for i in $hosts:
do
    expect -c "
    spawn ssh-copy-id -i root@$i
    expect {
    \"(yes/no/*)\" {send \"yes\r\";exp_continue}
    \"password\" {send \"$pass\r\";exp_continue}
}
"
done

ceph1

#1、安装expect
root@ceph1:~# apt install -y expect
#2、生成密钥
root@ceph1:~# ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
#3、分发密钥
root@ceph1:~# ./expect.sh

ceph2

#1、安装expect
root@ceph2:~# apt install -y expect
#2、生成密钥
root@ceph2:~# ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
#3、分发密钥
root@ceph2:~# ./expect.sh

时间服务

ceph1

#1、设置时区
root@localhost:~# timedatectl set-timezone Asia/Shanghai
#2、安装chrony
root@localhost:~# apt install -y chrony
#3、修改配置文件
root@localhost:~# egrep -v "^#|^$" /etc/chrony/chrony.conf
keyfile /etc/chrony/chrony.keys
driftfile /var/lib/chrony/chrony.drift
logdir /var/log/chrony
maxupdateskew 100.0
rtcsync
makestep 1 3
server ntp.aliyun.com iburst
allow 192.168.232.0/24
#4、重启服务
root@localhost:~# systemctl restart chronyd
#5、检查
root@localhost:~# chronyc sources -v
210 Number of sources = 1

  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '*' = current synced, '+' = combined , '-' = not combined,
| /   '?' = unreachable, 'x' = time may be in error, '~' = time too variable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample
===============================================================================
^* ntp.aliyun.com                2   6    17    24   -884us[-9646us] +/-   39ms

ceph2

#1、设置时区
root@localhost:~# timedatectl set-timezone Asia/Shanghai
#2、安装chrony
root@localhost:~# apt install -y chrony
#3、修改配置文件
root@localhost:~# egrep -v "^#|^$" /etc/chrony/chrony.conf
keyfile /etc/chrony/chrony.keys
driftfile /var/lib/chrony/chrony.drift
logdir /var/log/chrony
maxupdateskew 100.0
rtcsync
makestep 1 3
server ceph1 iburst
#4、重启服务
root@localhost:~# systemctl restart chronyd
#5、检查
root@ceph2:~# chronyc sources -v
210 Number of sources = 1

  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '*' = current synced, '+' = combined , '-' = not combined,
| /   '?' = unreachable, 'x' = time may be in error, '~' = time too variable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample
===============================================================================
^* ceph1                         3   6    17    12    +31us[ +152us] +/-  109ms

ceph部署

在GitHub上下载ceph-deploy并上传到ceph1。

ceph1

#1、ceph-deploy安装
root@ceph1:~# apt-get install python3 python3-pip -y
root@ceph1:~# git clone https://github.com/ceph/ceph-deploy.git
root@ceph1:~# cd ceph-deploy
root@ceph1:~/ceph-deploy# python3 setup.py install

#2、ceph集群安装
root@ceph1:~# mkdir ceph
root@ceph1:~# cd ceph
root@ceph1:~/ceph# ceph-deploy install ceph1 ceph2
root@ceph1:~/ceph# ceph-deploy new ceph1 ceph2
root@ceph1:~/ceph# cat ceph.conf
[global]
fsid = d5e1b2dd-16d7-4814-9d82-8a982446fe9f
mon_initial_members = ceph1, ceph2
mon_host = 192.168.232.10,192.168.232.20
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd_pool_default_size = 2
osd_pool_default_min_size = 1
public_network = 192.168.232.0/24
cluster_network = 192.168.233.0/24
root@ceph1:~/ceph# ceph-deploy mon create-initial
root@ceph1:~/ceph# ceph-deploy mgr create ceph1 ceph2
root@ceph1:~/ceph# ceph-deploy admin ceph1 ceph2
root@ceph1:~/ceph# ceph-deploy osd create --data /dev/sdb ceph1
root@ceph1:~/ceph# ceph-deploy osd create --data /dev/sdc ceph1
root@ceph1:~/ceph# ceph-deploy osd create --data /dev/sdd ceph1
root@ceph1:~/ceph# ceph-deploy osd create --data /dev/sdb ceph2
root@ceph1:~/ceph# ceph-deploy osd create --data /dev/sdc ceph2
root@ceph1:~/ceph# ceph-deploy osd create --data /dev/sdd ceph2

#3、检查
root@ceph1:~/ceph# ceph -s
  cluster:
    id:     d5e1b2dd-16d7-4814-9d82-8a982446fe9f
    health: HEALTH_OK

  services:
    mon: 2 daemons, quorum ceph1,ceph2 (age 5m)
    mgr: ceph1(active, since 4m), standbys: ceph2
    osd: 6 osds: 6 up (since 13s), 6 in (since 13s)

  task status:

  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   6.0 GiB used, 594 GiB / 600 GiB avail
    pgs:     1 active+clean

root@ceph1:~/ceph# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.58612  root default
-3         0.29306      host ceph1
 0    hdd  0.09769          osd.0       up   1.00000  1.00000
 1    hdd  0.09769          osd.1       up   1.00000  1.00000
 2    hdd  0.09769          osd.2       up   1.00000  1.00000
-5         0.29306      host ceph2
 3    hdd  0.09769          osd.3       up   1.00000  1.00000
 4    hdd  0.09769          osd.4       up   1.00000  1.00000
 5    hdd  0.09769          osd.5       up   1.00000  1.00000

#4、创建存储池
root@ceph1:~/ceph# ceph osd pool create pool01 64 64

#5、查看存储池
root@ceph1:~/ceph# ceph osd pool ls
device_health_metrics
pool01

root@ceph1:~/ceph# ceph df
--- RAW STORAGE ---
CLASS  SIZE     AVAIL    USED    RAW USED  %RAW USED
hdd    600 GiB  594 GiB  33 MiB   6.0 GiB       1.01
TOTAL  600 GiB  594 GiB  33 MiB   6.0 GiB       1.01

--- POOLS ---
POOL                   ID  PGS  STORED  OBJECTS  USED  %USED  MAX AVAIL
device_health_metrics   1    1     0 B        0   0 B      0    282 GiB
pool01                  2   64     0 B        0   0 B      0    282 GiB

root@ceph1:~/ceph# ceph -s
  cluster:
    id:     d5e1b2dd-16d7-4814-9d82-8a982446fe9f
    health: HEALTH_OK

  services:
    mon: 2 daemons, quorum ceph1,ceph2 (age 8m)
    mgr: ceph1(active, since 8m), standbys: ceph2
    osd: 6 osds: 6 up (since 3m), 6 in (since 3m)

  data:
    pools:   2 pools, 65 pgs
    objects: 0 objects, 0 B
    usage:   6.0 GiB used, 594 GiB / 600 GiB avail
    pgs:     65 active+clean

添加节点

IP地址

ceph3:

root@localhost:~# cat /etc/netplan/00-installer-config.yaml
# This is the network config written by 'subiquity'
network:
  ethernets:
    ens33:
      addresses:
      - 192.168.232.30/24
      gateway4: 192.168.232.2
      nameservers:
        addresses:
        - 192.168.232.2
    ens38:
      addresses:
      - 192.168.233.30/24
      gateway4: 192.168.232.2
      nameservers:
        addresses:
        - 192.168.233.2
  version: 2

host配置

root@localhost:~# egrep -v "^#|^$" /etc/hosts
127.0.0.1 localhost
192.168.232.10 ceph1
192.168.232.20 ceph2
192.168.232.30 ceph3

ceph3

root@ceph:~# hostnamectl set-hostname ceph3

密钥分发

#密钥分发脚本:expect.sh

#!/bin/bash

hosts=$(egrep -v '^#|^$' /etc/hosts | awk '{print $2}')
pass='password'

echo $hosts

for i in $hosts:
do
    expect -c "
    spawn ssh-copy-id -i root@$i
    expect {
    \"(yes/no/*)\" {send \"yes\r\";exp_continue}
    \"password\" {send \"$pass\r\";exp_continue}
}
"
done

ceph1

#1、分发密钥
root@ceph1:~# ./expect.sh

ceph2

#1、分发密钥
root@ceph2:~# ./expect.sh

ceph3

#1、安装expect
root@ceph2:~# apt install -y expect
#2、生成密钥
root@ceph2:~# ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
#3、分发密钥
root@ceph2:~# ./expect.sh

时间服务

ceph3

#1、设置时区
root@localhost:~# timedatectl set-timezone Asia/Shanghai
#2、安装chrony
root@localhost:~# apt install -y chrony
#3、修改配置文件
root@localhost:~# egrep -v "^#|^$" /etc/chrony/chrony.conf
keyfile /etc/chrony/chrony.keys
driftfile /var/lib/chrony/chrony.drift
logdir /var/log/chrony
maxupdateskew 100.0
rtcsync
makestep 1 3
server ceph1 iburst
#4、重启服务
root@localhost:~# systemctl restart chronyd
#5、检查
root@ceph3:~# chronyc sources -v
210 Number of sources = 1

  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '*' = current synced, '+' = combined , '-' = not combined,
| /   '?' = unreachable, 'x' = time may be in error, '~' = time too variable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample
===============================================================================
^* ceph1                         3   6    17     9  -9305ns[  -67us] +/-   57ms

添加节点

ceph1

#1、添加节点
root@ceph1:~/ceph# ceph-deploy install ceph3
root@ceph1:~/ceph# cat ceph.conf
[global]
fsid = d5e1b2dd-16d7-4814-9d82-8a982446fe9f
mon_initial_members = ceph1, ceph2, ceph3
mon_host = 192.168.232.10,192.168.232.20,192.168.232.30
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd_pool_default_size = 3
osd_pool_default_min_size = 2
public_network = 192.168.232.0/24
cluster_network = 192.168.233.0/24
root@ceph1:~/ceph# ceph-deploy mon create ceph3
root@ceph1:~/ceph# ceph-deploy mgr create ceph3
root@ceph1:~/ceph# ceph-deploy admin ceph3
root@ceph1:~/ceph# ceph-deploy --overwrite-conf config push ceph1 ceph2 ceph3
root@ceph1:~/ceph# ceph-deploy osd create --data /dev/sdb ceph3
root@ceph1:~/ceph# ceph-deploy osd create --data /dev/sdc ceph3
root@ceph1:~/ceph# ceph-deploy osd create --data /dev/sdd ceph3

#2、检查
root@ceph1:~/ceph# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.87918  root default
-3         0.29306      host ceph1
 0    hdd  0.09769          osd.0       up   1.00000  1.00000
 1    hdd  0.09769          osd.1       up   1.00000  1.00000
 2    hdd  0.09769          osd.2       up   1.00000  1.00000
-5         0.29306      host ceph2
 3    hdd  0.09769          osd.3       up   1.00000  1.00000
 4    hdd  0.09769          osd.4       up   1.00000  1.00000
 5    hdd  0.09769          osd.5       up   1.00000  1.00000
-7         0.29306      host ceph3
 6    hdd  0.09769          osd.6       up   1.00000  1.00000
 7    hdd  0.09769          osd.7       up   1.00000  1.00000
 8    hdd  0.09769          osd.8       up   1.00000  1.00000

root@ceph1:~/ceph# ceph -s
  cluster:
    id:     d5e1b2dd-16d7-4814-9d82-8a982446fe9f
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m)
    mgr: ceph1(active, since 38m), standbys: ceph2, ceph3
    osd: 9 osds: 9 up (since 14s), 9 in (since 14s)

  data:
    pools:   2 pools, 65 pgs
    objects: 0 objects, 0 B
    usage:   9.1 GiB used, 891 GiB / 900 GiB avail
    pgs:     65 active+clean


root@ceph1:~/ceph# ceph osd pool get pool01 size
size: 2
root@ceph1:~/ceph# ceph osd pool set pool01 size 3

删除节点

  1. ceph集群正常的状态
root@ceph1:~/ceph# ceph -s
  cluster:
    id:     d5e1b2dd-16d7-4814-9d82-8a982446fe9f
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 2h)
    mgr: ceph1(active, since 18h), standbys: ceph3
    osd: 9 osds: 9 up (since 2h), 9 in (since 2h)

  data:
    pools:   2 pools, 65 pgs
    objects: 0 objects, 0 B
    usage:   9.2 GiB used, 891 GiB / 900 GiB avail
    pgs:     65 active+clean
  1. 现在关掉ceph3,模拟机器损坏掉线。
#1、查看集群状态
root@ceph1:~# ceph -s
  cluster:
    id:     d5e1b2dd-16d7-4814-9d82-8a982446fe9f
    health: HEALTH_WARN
            1/3 mons down, quorum ceph1,ceph2
            3 osds down
            1 host (3 osds) down
            Degraded data redundancy: 1 pg undersized

  services:
    mon: 3 daemons, quorum ceph1,ceph2 (age 95s), out of quorum: ceph3
    mgr: ceph1(active, since 2m)
    osd: 9 osds: 6 up (since 2m), 9 in (since 46h)

  data:
    pools:   2 pools, 65 pgs
    objects: 0 objects, 0 B
    usage:   6.2 GiB used, 594 GiB / 600 GiB avail
    pgs:     48 active+undersized
             17 active+clean

#2、查看osd状态
root@ceph1:~# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.87918  root default
-3         0.29306      host ceph1
 0    hdd  0.09769          osd.0       up   1.00000  1.00000
 1    hdd  0.09769          osd.1       up   1.00000  1.00000
 2    hdd  0.09769          osd.2       up   1.00000  1.00000
-5         0.29306      host ceph2
 3    hdd  0.09769          osd.3       up   1.00000  1.00000
 4    hdd  0.09769          osd.4       up   1.00000  1.00000
 5    hdd  0.09769          osd.5       up   1.00000  1.00000
-7         0.29306      host ceph3
 6    hdd  0.09769          osd.6     down   1.00000  1.00000
 7    hdd  0.09769          osd.7     down   1.00000  1.00000
 8    hdd  0.09769          osd.8     down   1.00000  1.00000
  1. 手动移除osd
#1、把osd踢出集群
root@ceph1:~# ceph osd out osd.6
marked out osd.6.
root@ceph1:~# ceph osd out osd.7
marked out osd.7.
root@ceph1:~# ceph osd out osd.8
marked out osd.8.

#2、查看集群状态,踢出集群,以使 Ceph 启动重新均衡、把数据拷贝到其他 OSD 。
root@ceph1:~# ceph -s
  cluster:
    id:     d5e1b2dd-16d7-4814-9d82-8a982446fe9f
    health: HEALTH_WARN
            1/3 mons down, quorum ceph1,ceph2

  services:
    mon: 3 daemons, quorum ceph1,ceph2 (age 9m), out of quorum: ceph3
    mgr: ceph1(active, since 10m)
    osd: 9 osds: 6 up (since 9m), 6 in (since 23s)

  data:
    pools:   2 pools, 65 pgs
    objects: 0 objects, 0 B
    usage:   6.2 GiB used, 594 GiB / 600 GiB avail
    pgs:     65 active+clean

root@ceph1:~# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.87918  root default
-3         0.29306      host ceph1
 0    hdd  0.09769          osd.0       up   1.00000  1.00000
 1    hdd  0.09769          osd.1       up   1.00000  1.00000
 2    hdd  0.09769          osd.2       up   1.00000  1.00000
-5         0.29306      host ceph2
 3    hdd  0.09769          osd.3       up   1.00000  1.00000
 4    hdd  0.09769          osd.4       up   1.00000  1.00000
 5    hdd  0.09769          osd.5       up   1.00000  1.00000
-7         0.29306      host ceph3
 6    hdd  0.09769          osd.6     down         0  1.00000
 7    hdd  0.09769          osd.7     down         0  1.00000
 8    hdd  0.09769          osd.8     down         0  1.00000

#3、停osd服务
#由于此处服务器已停止运行,osd已down。
#如服务器正常运行,请systemctl stop ceph-osd@#.service

#4、删除cursh图的对应osd条目,它就不再接收数据了。
root@ceph1:~# ceph osd crush remove osd.6
removed item id 6 name 'osd.6' from crush map
root@ceph1:~# ceph osd crush remove osd.7
removed item id 7 name 'osd.7' from crush map
root@ceph1:~# ceph osd crush remove osd.8
removed item id 8 name 'osd.8' from crush map

root@ceph1:~# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.58612  root default
-3         0.29306      host ceph1
 0    hdd  0.09769          osd.0       up   1.00000  1.00000
 1    hdd  0.09769          osd.1       up   1.00000  1.00000
 2    hdd  0.09769          osd.2       up   1.00000  1.00000
-5         0.29306      host ceph2
 3    hdd  0.09769          osd.3       up   1.00000  1.00000
 4    hdd  0.09769          osd.4       up   1.00000  1.00000
 5    hdd  0.09769          osd.5       up   1.00000  1.00000
-7               0      host ceph3
 6               0  osd.6             down         0  1.00000
 7               0  osd.7             down         0  1.00000
 8               0  osd.8             down         0  1.00000

#5、删除osd认证密钥
root@ceph1:~# ceph auth del osd.6
updated
root@ceph1:~# ceph auth del osd.7
updated
root@ceph1:~# ceph auth del osd.8
updated

#6、删除osd
root@ceph1:~# ceph osd rm osd.6
removed osd.6
root@ceph1:~# ceph osd rm osd.7
removed osd.7
root@ceph1:~# ceph osd rm osd.8
removed osd.8

root@ceph1:~# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.58612  root default
-3         0.29306      host ceph1
 0    hdd  0.09769          osd.0       up   1.00000  1.00000
 1    hdd  0.09769          osd.1       up   1.00000  1.00000
 2    hdd  0.09769          osd.2       up   1.00000  1.00000
-5         0.29306      host ceph2
 3    hdd  0.09769          osd.3       up   1.00000  1.00000
 4    hdd  0.09769          osd.4       up   1.00000  1.00000
 5    hdd  0.09769          osd.5       up   1.00000  1.00000
-7               0      host ceph3

#7、删除host
root@ceph1:~# ceph osd crush remove ceph3
removed item id -7 name 'ceph3' from crush map

root@ceph1:~# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.58612  root default
-3         0.29306      host ceph1
 0    hdd  0.09769          osd.0       up   1.00000  1.00000
 1    hdd  0.09769          osd.1       up   1.00000  1.00000
 2    hdd  0.09769          osd.2       up   1.00000  1.00000
-5         0.29306      host ceph2
 3    hdd  0.09769          osd.3       up   1.00000  1.00000
 4    hdd  0.09769          osd.4       up   1.00000  1.00000
 5    hdd  0.09769          osd.5       up   1.00000  1.00000

#从上可以看出将osd out出集群后osd的REWEIGHT降到0,数据将重新分布至其他osd;
#从crush map中删除osd后osd的WEIGHT降到0,新的数据将不会写入该osd。
  1. 手动移除mon
#1、查看mon状态
root@ceph1:~# ceph mon stat
e6: 3 mons at {ceph1=[v2:192.168.232.10:3300/0,v1:192.168.232.10:6789/0],ceph2=[v2:192.168.232.20:3300/0,v1:192.168.232.20:6789/0],ceph3=[v2:192.168.232.30:3300/0,v1:192.168.232.30:6789/0]}, election epoch 88, leader 0 ceph1, quorum 0,1 ceph1,ceph2

#2、从集群删除监视器,如果此步骤导致只剩 2 个监视器了,你得增加或删除一个监视器,直到凑足法定人数所必需的 ceph-mon 数。
#为基数个,先删除down机的,在删除正常的保证ceph不卡
#这里就不删2个mon节点了,删一个ceph3示例一下。
root@ceph1:~# ceph mon remove ceph3
removing mon.ceph3 at [v2:192.168.232.30:3300/0,v1:192.168.232.30:6789/0], there will be 2 monitors

root@ceph1:~/ceph# ceph -s
  cluster:
    id:     1d399eec-a8d7-432a-b8dc-a248231b3981
    health: HEALTH_OK

  services:
    mon: 2 daemons, quorum ceph1,ceph2 (age 26s)
    mgr: ceph1(active, since 45m), standbys: ceph2
    osd: 6 osds: 6 up (since 10m), 6 in (since 41m)

  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   9.1 GiB used, 591 GiB / 600 GiB avail
    pgs:     1 active+clean


#3、启动ceph3,执行恢复mon
root@ceph1:~/ceph# ceph-deploy mon create ceph3

#可以看见添加时报错,反正乱七八糟的也不懂撒问题
[ceph3][INFO  ] Running command: systemctl start ceph-mon@ceph3
[ceph3][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph3.asok mon_status
[ceph3][ERROR ] admin_socket: exception getting command descriptions: [Errno 2] No such file or directory
[ceph3][WARNIN] monitor: mon.ceph3, might not be running yet
[ceph3][INFO  ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph3.asok mon_status
[ceph3][ERROR ] admin_socket: exception getting command descriptions: [Errno 2] No such file or directory
[ceph3][WARNIN] monitor ceph3 does not exist in monmap

#试了下,有时候可以通过
root@ceph1:~/ceph# ceph mon add ceph3
#然后再次执行
root@ceph1:~/ceph# ceph-deploy mon create ceph3

#如果上述还是依旧报同样错误,那么删除改文件夹
root@ceph3:~# rm -fr /var/lib/ceph/mon/ceph-ceph3/
#然后再次执行,即可ok
root@ceph1:~/ceph# ceph-deploy mon create ceph3
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值