1.
[root@ceph1 ~]# systemctl restart chronyd
[root@ceph1 ~]# for i in {2..5}
> do
> scp /etc/chrony.conf 192.168.4.$i:/etc/chrony.conf
> done
chrony.conf 100% 1130 866.5KB/s 00:00
chrony.conf 100% 1130 1.1MB/s 00:00
chrony.conf 100% 1130 1.0MB/s 00:00
chrony.conf 100% 1130 61.1KB/s 00:00
[root@ceph1 ~]# for i in {2..5}
> do
> ssh ceph$i systemctl restart chronyd
> done
ceph1 ceph3: monitor/osd
ceph4: mds
http://docs.ceph.org.cn/ ceph官方网站
查看运行状态:
[root@ceph1 ~]# cd ceph-cluster/
[root@ceph1 ceph-cluster]# ceph -s
cluster 29a54064-be3d-446c-8e62-64b0f14e73bb
health HEALTH_ERR
64 pgs are stuck inactive for more than 300 seconds
64 pgs stuck inactive
no osds
monmap e1: 3 mons at {ceph1=192.168.4.1:6789/0,ceph2=192.168.4.2:6789/0,ceph3=192.168.4.3:6789/0}
election epoch 4, quorum 0,1,2 ceph1,ceph2,ceph3
osdmap e1: 0 osds: 0 up, 0 in
flags sortbitwise
pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
0 kB used, 0 kB / 0 kB avail
64 creating
[root@ceph1 ~]# for vm in ceph{1..3}; do ssh $vm parted /dev/vdb mklabel gpt; done
信息: You may need to update /etc/fstab.
信息: You may need to update /etc/fstab.
信息: You may need to update /etc/fstab.
[root@ceph1 ~]# for vm in ceph{1..3}
> do
> ssh $vm parted /dev/vdb mkpart primary 1M 50%
> done
信息: You may need to update /etc/fstab.
信息: You may need to update /etc/fstab.
信息: You may need to update /etc/fstab.
[root@ceph1 ~]# for vm in ceph{1..3}
> do
> ssh $vm parted /dev/vdb mkpart primary 50% 100%
> done
信息: You may need to update /etc/fstab.
信息: You may need to update /etc/fstab.
信息: You may need to update /etc/fstab.
[root@ceph1 ~]# for vm in ceph{1..3}; do ssh ${vm} chown ceph.ceph /dev/vdb? ; done
[root@ceph1 ~]# lsblk
[root@ceph1 ~]# ll /dev/vdb1
brw-rw----. 1 ceph ceph 252, 17 8月 18 03:11 /dev/vdb1
一份文件存储3份:(必须保证3份数据)有磁盘损坏,就会数据迁移
[root@ceph1 ~]# ceph osd lspools
0 rbd,
[root@ceph1 ~]# ceph osd pool get rbd size
size: 3
撤销保护删除快照:
[root@ceph1 ~]# rbd snap unprotect image --snap image-snap1
[root@ceph1 ~]# rbd snap rm image --snap image-snap1
[root@ceph1 ~]# rbd list