1、将各节点的IP和hostname加入到每个节点的/etc/hosts中
echo 192.168.63.141 admin-node >> /etc/hosts
echo 192.168.63.142 ceph-node1 >> /etc/hosts
....
拷贝到其它主机:scp /etc/hosts root@ceph-node1:/etc/
在admin-node主机上执行:ssh-keygen
并将它复制到其他节点:ssh-copy-id hostname@ceph-node1
2、
在操作系统防火墙内启用Ceph monitor、OSD、MDS所需的端口。(所有机器中都需要执行以下命令)
启动firewalld:service firewalld start
firewall-cmd --zone=public --add-port=6789/tcp --permanent
firewall-cmd --zone=public --add-port=6800-7100/tcp --permanent
firewall-cmd --reload
firewall-cmd --zone=public --list-all
实例:
[root@admin-node ~]# service firewalld start
Redirecting to /bin/systemctl start firewalld.service
[root@admin-node ~]# firewall-cmd --zone=public --add-port=6789/tcp --permanent
success
[root@admin-node ~]# firewall-cmd --zone=public --add-port=6800-7100/tcp --permanent
success
[root@admin-node ~]# firewall-cmd --reload
success
[root@admin-node ~]# firewall-cmd --zone=public --list-all
public (default, active)
interfaces: e777no16736
sources:
services: dhcpv6-client ssh
ports: 6789/tcp 6800-7100/tcp
masquerade: no
forward-ports:
icmp-blocks:
rich rules:
[root@admin-node ~]#
3、在所有机器上禁用SELINUX
[root@admin-node ~]# setenforce 0
[root@admin-node ~]# sed -i s'/SELINUX.*=.*enforcing/SELINUX=disabled'/g /etc/selinux/config
查看文件config中SELINUX=disabled 即可
4、在所有机器上安装并配置ntp服务
yum -y install ntp ntpdate
[root@admin-node ~]# ntpdate pool.ntp.org
3 Sep 10:13:10 ntpdate[13011]: adjust time server 202.118.1.81 offset -0.003634 sec
[root@admin-node ~]# systemctl restart ntpdate.service
[root@admin-node ~]# systemctl restart ntpd.service
[root@admin-node ~]# systemctl enable ntpd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/ntpd.service to /usr/lib/systemd/system/ntpd.service.
[root@admin-node ~]# systemctl enable ntpdate.service
Created symlink from /etc/systemd/system/multi-user.target.wants/ntpdate.service to /usr/lib/systemd/system/ntpdate.service.
5、在所有ceph节点上添加Ceph Giant版本库并更新yum:
[root@admin-node ~]# rpm -Uhv http://ceph.com/rpm-giant/el7/noarch/ceph-release-1-0.el7.noarch.rpm
Retrieving http://ceph.com/rpm-giant/el7/noarch/ceph-release-1-0.el7.noarch.rpm
warning: /var/tmp/rpm-tmp.5WFToc: Header V4 RSA/SHA1 Signature, key ID 460f3994: NOKEY
Preparing... ################################# [100%]
Updating / installing...
1:ceph-release-1-0.el7 ################################# [100%]
5、利用ceph-deploy搭建集群,在管理节点上安装ceph-deploy
yum -y install ceph-deploy
mkdir /etc/ceph cd /etc/ceph
ceph-deploy new admin-node
在每个节点上都安装ceph:
[root@admin-node ceph]# ceph-deploy install ceph-node1 admin-node ceph-node2
6、在admin-node中创建第一个Ceph monitor:
ceph-deploy mon create-initial
部署集群到此成功,查看集群状态:
[root@admin-node ceph]# ceph -s
cluster 5035c6ba-96c8-4378-a086-a8b579089dd6
health HEALTH_ERR
64 pgs stuck inactive
64 pgs stuck unclean
no osds
monmap e1: 1 mons at {admin-node=192.168.63.140:6789/0}
election epoch 2, quorum 0 admin-node
osdmap e1: 0 osds: 0 up, 0 in
flags sortbitwise
pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
0 kB used, 0 kB / 0 kB avail
64 creating
显示几区不健康,还需继续配置。
7、增加osd
在/var/local路径下mkdir osd1 osd2 osd3 并chmod 777 osd*
[root@admin-node ceph]# ceph-deploy --overwrite-conf osd prepare admin-node:/var/local/osd1
[root@admin-node ceph]# ceph-deploy --overwrite-conf osd activate admin-node:/var/local/osd1
[root@admin-node ceph]# ceph -s
cluster 7863ef1c-1e65-4db7-af36-1310975e056e
health HEALTH_WARN
64 pgs degraded
64 pgs stuck inactive
64 pgs stuck unclean
64 pgs undersized
monmap e1: 3 mons at {admin-node=192.168.63.141:6789/0,ceph-node1=192.168.63.142:6789/0,ceph-node2=192.168.63.143:6789/0}
election epoch 4, quorum 0,1,2 admin-node,ceph-node1,ceph-node2
osdmap e5: 1 osds: 1 up, 1 in
flags sortbitwise
pgmap v7: 64 pgs, 1 pools, 0 bytes data, 0 objects
6890 MB used, 10987 MB / 17878 MB avail
64 undersized+degraded+peered
增加monitor:
[root@admin-node ceph]# ceph-deploy mon create ceph-node2
#mkfs.xfs
/dev/sdb
执行:
#mount /dev/sdb /opt/ceph/
ceph-deploy osd prepare{ceph-node}:/path/to/directory
6.2.4. Mount数据分区
mount -o user_xattr /dev/{hdd} /var/lib/ceph/osd/{cluster-name}-{osd-number}执行:
sh
sudo mount -o user_xattr /dev/sdc1 /var/lib/ceph/osd/ceph-0
部署过程中如果出现任何奇怪的问题无法解决,可以简单的删除一切从头再来:
5.1 如何清空ceph数据
先清空之前所有的ceph数据,如果是新装不用执行此步骤,如果是重新部署的话也执行下面的命令:
ceph-deploy purgedata {ceph-node} [{ceph-node}]
ceph-deploy forgetkeys
看一下配置成功了没?
# ceph healthHEALTH_WARN too few PGs per OSD (10< min 30)
增加 PG 数目,根据 Total PGs = (#OSDs * 100) / pool size 公式来决定 pg_num(pgp_num 应该设成和 pg_num 一样),所以 20*100/2=1000,Ceph 官方推荐取最接近2的指数倍,所以选择 1024。如果顺利的话,就应该可以看到 HEALTH_OK 了:
# ceph osd pool set rbd size 2set pool 0 size to 2# ceph osd pool set rbd min_size 2set pool 0 min_size to 2# ceph osd pool set rbd pg_num 1024set pool 0 pg_num to 1024# ceph osd pool set rbd pgp_num 1024set pool 0 pgp_num to 1024# ceph healthHEALTH_OK
如果操作没有问题的话记得把上面操作写到 ceph.conf 文件里,并同步部署的各节点:
# vi ceph.conf[global]fsid =6349efff-764a-45ec-bfe9-ed8f5fa25186mon_initial_members = ceph-mon1, ceph-mon2, ceph-mon3mon_host =192.168.2.101,192.168.2.102,192.168.2.103auth_cluster_required = cephxauth_service_required = cephxauth_client_required = cephxfilestore_xattr_use_omap =trueosd pool default size =2osd pool default min size =2osd pool default pg num =1024osd pool default pgp num =1024# ceph-deploy admin ceph-adm ceph-mon1 ceph-mon2 ceph-mon3 ceph-osd1 ceph-osd2
Another app is currently holding the yum lock; waiting for it to exit...
强制关掉yum进程:
#rm
-f
/
var
/run/yum
.
pid
Ceph集群搭建与配置
本文详细介绍Ceph集群的搭建过程,包括配置hosts文件、开放防火墙端口、禁用SELINUX、安装ntp服务等前期准备,以及通过ceph-deploy进行集群搭建、增加monitor和OSD的具体步骤。
3408

被折叠的 条评论
为什么被折叠?



