(1 )环境
MON: 172.16.2.61 (ceph61)
OSD: 172.16.2.60 (ceph60) 172.16.2.63 (ceph63)
(2)建立集群
安装rpm包,包括外部依赖包(安装rpm时会提示)和ceph rpm包(http://download.ceph.com/rpm/el7/x86_64/)
rpm -Uvh ~/rpms/extras/*
rpm -ivh ~/rpms/x86_64/*
配置/etc/hosts hosts
配置/etc/ceph/ceph.conf ceph.conf
ssh ceph61
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
monmaptool --create --add ceph61 172.16.2.61 --fsid d61f9a31-e17d-4191-812c-8af331eb0abe /tmp/monmap --clobber
ceph-mon --mkfs -i ceph61 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
ceph-mon --conf /etc/ceph/ceph.conf --id ceph61 --name mon.ceph61
ssh ceph60
uuidgen
ceph osd create 221e2823-1cb7-4aa3-8c3a-f60c903fb651 1
mkdir -p /var/lib/ceph/osd/ceph-1
mkfs -t ext4 /dev/sdd1
mount -o user_xattr /dev/sdd1 /var/lib/ceph/osd/ceph-1
ceph-osd -i 1 --mkfs --mkkey --osd-uuid 221e2823-1cb7-4aa3-8c3a-f60c903fb651
ceph auth add osd.1 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-1/keyring
ceph osd crush add-bucket ceph60 host
ceph osd crush move ceph60 root=default
ceph osd crush add osd.1 1.0 host=ceph60
ceph-osd --id 1 --name osd.1 --conf /etc/ceph/ceph.conf
ceph63的配置过程与ceph60类似,重新生成uuid,把osd的id换成0即可
配置完成后查看
ceph -s
cluster d61f9a31-e17d-4191-812c-8af331eb0abe
health HEALTH_WARN
mon.ceph61 low disk space
monmap e1: 1 mons at {ceph61=172.16.2.61:6789/0}
election epoch 2, quorum 0 ceph61
osdmap e21: 2 osds: 2 up, 2 in
flags sortbitwise
pgmap v34: 64 pgs, 1 pools, 0 bytes data, 0 objects
2118 MB used, 20014 MB / 23365 MB avail
64 active+clean
ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 2.00000 root default
-2 0 host ceph61
-4 1.00000 host ceph60
1 1.00000 osd.1 up 1.00000 1.00000
-3 1.00000 host ceph63
0 1.00000 osd.0 up 1.00000 1.00000
(3)创建块设备
ceph osd pool create mypool 100
ceph osd pool set-quota mypool max_bytes 4096000000
rbd create --size 2048 mypool/myimage
modprobe libcrc32c
libceph.ko rbd.ko
insmod /home/data/libceph.ko
insmod /home/data/rbd.ko
rbd map mypool/myimage --id admin
查看pool
[root@localhost x86_64]# ceph osd lspools
0 rbd,1 mypool,
[root@localhost x86_64]#
[root@localhost x86_64]# rbd ls mypool
myimage
[root@localhost x86_64]#
查看image
[root@localhost x86_64]# rbd info mypool/myimage
rbd image 'myimage':
size 2048 MB in 512 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.1036b0f5f44
format: 2
features: layering
flags:
[root@localhost x86_64]#
rbd块映射
[root@localhost x86_64]# rbd showmapped
id pool image snap device
1 mypool myimage - /dev/rbd1
[root@localhost x86_64]#
[root@localhost x86_64]# rados df
pool name KB objects clones degraded unfound rd rd KB wr wr KB
mypool 4097 4 0 0 0 19 15 15 4097
rbd 0 0 0 0 0 0 0 0 0
total used 2178828 4
total avail 20485500
total space 23925792
[root@localhost x86_64]#
MON: 172.16.2.61 (ceph61)
OSD: 172.16.2.60 (ceph60) 172.16.2.63 (ceph63)
(2)建立集群
安装rpm包,包括外部依赖包(安装rpm时会提示)和ceph rpm包(http://download.ceph.com/rpm/el7/x86_64/)
rpm -Uvh ~/rpms/extras/*
rpm -ivh ~/rpms/x86_64/*
配置/etc/hosts hosts
配置/etc/ceph/ceph.conf ceph.conf
ssh ceph61
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
monmaptool --create --add ceph61 172.16.2.61 --fsid d61f9a31-e17d-4191-812c-8af331eb0abe /tmp/monmap --clobber
ceph-mon --mkfs -i ceph61 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
ceph-mon --conf /etc/ceph/ceph.conf --id ceph61 --name mon.ceph61
ssh ceph60
uuidgen
ceph osd create 221e2823-1cb7-4aa3-8c3a-f60c903fb651 1
mkdir -p /var/lib/ceph/osd/ceph-1
mkfs -t ext4 /dev/sdd1
mount -o user_xattr /dev/sdd1 /var/lib/ceph/osd/ceph-1
ceph-osd -i 1 --mkfs --mkkey --osd-uuid 221e2823-1cb7-4aa3-8c3a-f60c903fb651
ceph auth add osd.1 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-1/keyring
ceph osd crush add-bucket ceph60 host
ceph osd crush move ceph60 root=default
ceph osd crush add osd.1 1.0 host=ceph60
ceph-osd --id 1 --name osd.1 --conf /etc/ceph/ceph.conf
ceph63的配置过程与ceph60类似,重新生成uuid,把osd的id换成0即可
配置完成后查看
ceph -s
cluster d61f9a31-e17d-4191-812c-8af331eb0abe
health HEALTH_WARN
mon.ceph61 low disk space
monmap e1: 1 mons at {ceph61=172.16.2.61:6789/0}
election epoch 2, quorum 0 ceph61
osdmap e21: 2 osds: 2 up, 2 in
flags sortbitwise
pgmap v34: 64 pgs, 1 pools, 0 bytes data, 0 objects
2118 MB used, 20014 MB / 23365 MB avail
64 active+clean
ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 2.00000 root default
-2 0 host ceph61
-4 1.00000 host ceph60
1 1.00000 osd.1 up 1.00000 1.00000
-3 1.00000 host ceph63
0 1.00000 osd.0 up 1.00000 1.00000
(3)创建块设备
ceph osd pool create mypool 100
ceph osd pool set-quota mypool max_bytes 4096000000
rbd create --size 2048 mypool/myimage
modprobe libcrc32c
libceph.ko rbd.ko
insmod /home/data/libceph.ko
insmod /home/data/rbd.ko
rbd map mypool/myimage --id admin
查看pool
[root@localhost x86_64]# ceph osd lspools
0 rbd,1 mypool,
[root@localhost x86_64]#
[root@localhost x86_64]# rbd ls mypool
myimage
[root@localhost x86_64]#
查看image
[root@localhost x86_64]# rbd info mypool/myimage
rbd image 'myimage':
size 2048 MB in 512 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.1036b0f5f44
format: 2
features: layering
flags:
[root@localhost x86_64]#
rbd块映射
[root@localhost x86_64]# rbd showmapped
id pool image snap device
1 mypool myimage - /dev/rbd1
[root@localhost x86_64]#
[root@localhost x86_64]# rados df
pool name KB objects clones degraded unfound rd rd KB wr wr KB
mypool 4097 4 0 0 0 19 15 15 4097
rbd 0 0 0 0 0 0 0 0 0
total used 2178828 4
total avail 20485500
total space 23925792
[root@localhost x86_64]#