首先安装 ceph的版本
# ceph -v
ceph version 10.2.2 (45107e21c568dd033c2f0a3107dec8f0b0e58374)
对磁盘进行分区操作
# parted /dev/vdb -s mklabel gpt
# parted /dev/vdb -s mkpart primary 1 10G
# parted /dev/vdb -s mkpart primary 10G 15G
# parted /dev/vdb -s mkpart primary 15G 20G
# parted /dev/vdc -s mklabel gpt
# # parted /dev/vdc -s mkpart primary 1 10G
# parted /dev/vdc -s mkpart primary 10G 15G
# parted /dev/vdc -s mkpart primary 15G 20G
创建ceph集群的步骤
uuidgen mkdir -p /data/mon/mon.0 monmaptool --create --add 0 127.0.0.1 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap ceph-mon -i 0 -c /etc/ceph/ceph.conf --mkfs --monmap /tmp/monmap --mon-data /data/mon/mon.0 --debug_mon 10 --fsid=a7f64266-0894-4f1e-a635-d0aeaca0e993 ceph-mon -i 0 ceph osd create mkdir -p /data/osd/osd.0 ceph-osd -i 0 --mkfs --osd-data=/data/osd/osd.0 -c /etc/ceph/ceph.conf --debug_osd 20 --mkkey ceph-osd -i 0 ceph osd create mkdir -p /data/osd/osd.1 ceph-osd -i 1 --mkfs --osd-data=/data/osd/osd.1 -c /etc/ceph/ceph.conf --debug_osd 20 --mkkey ceph-osd -i 1 ceph osd crush add-bucket unkownrack rack ceph osd tree ceph osd crush add-bucket host0 host ceph osd crush add-bucket host1 host ceph osd crush move host0 rack=unkownrack ceph osd crush move host1 rack=unkownrack ceph osd crush move unkownrack root=default ceph osd crush create-or-move osd.0 1.0 host=host0 rack=unkownrack root=default ceph osd crush create-or-move osd.1 1.0 host=host1 rack=unkownrack root=default |
ceph.conf的配置文件
[global] max open files = 131072 log file = /var/log/ceph/ceph-$name.log pid file = /var/run/ceph/$name.pid auth supported |