- 关闭系统iptables /selinux (这一步很关键)
service iptables stop
chkconfig iptables off
vim /etc/sysconfig/selinux
SELINUX=disabled
重启系统 shutdown -r now
- 部署规划
ceph-deploy-->node1 and node2 and node3 and node...N
- 创建ceph.repo源
vim /etc/yum.repos.d/ceph.repo
baseurl=http://ceph.com/rpm-firefly/el6/$basearch
priority=1
gpgcheck=1
type=rpm-md
[ceph-source]
name=Ceph source packages
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
enabled=1
baseurl=http://ceph.com/rpm-firefly/el6/SRPMS
priority=1
gpgcheck=1
type=rpm-md
[Ceph-noarch]
name=Ceph noarch packages
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
enabled=1
baseurl=http://ceph.com/rpm-firefly/el6/noarch
priority=1
gpgcheck=1
type=rpm-md
- 在admin-node安装ceph-deploy
yum install -y http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
yum install -y ceph-deploy
- openssh-server 无密码访问
- 所以节点安装 yum install openssh-server
- 关闭防火墙和selinux
- 修改/etc/sudoers的第 56行 加个!
- sshgen-key ssh-copy-id 主机
- 测试无密码访问正常
- ceph-deploy 创建指导节点为mon节点 ceph-deploy new zz-mon.zyunsoft.com
- 修改ceph.conf配置文件
osd pool default size = 3
[osd]
osd_mount_options_xfs = rw,noatime
keyring = /etc/ceph/keyring.$name
osd_mkfs_options_xfs = -f
public_network = 192.168.20.0/24
osd_mkfs_type = xfs
cluster_network = 172.16.20.0/24
osd_journal_size = 16384
- 登录各节点安装ceph
yum install *argparse* -y
ceph-deploy install zz-com04 zz-store01 zz-store02 zz-store03
- 添加初始监控节点并收集密钥
ceph-deploy mon create-initial
- 在一个节点列出磁盘, 执行以下命令:
- 在主分区中使用Ceph遥控磁盘 (删除它的分区表) ,执行以下命令: (重要:这会删除所有数据)
- 安装每个节点的硬盘去集群
ceph-deploy osd create zz-store01:sdb:/srv/ceph/osd.0 zz-store01:sdc:/srv/ceph/osd.1 zz-store01:sdd:/srv/ceph/osd.2 zz-store01:sde:/srv/ceph/osd.3 zz-store02:sdb:/srv/ceph/osd.4 zz-store02:sdc:/srv/ceph/osd.5 zz-store02:sdd:/srv/ceph/osd.6 zz-store02:sde:/srv/ceph/osd.7 zz-store03:sdb:/srv/ceph/osd.8 zz-store03:sdc:/srv/ceph/osd.9 zz-store03:sdd:/srv/ceph/osd.10 zz-store03:sde:/srv/ceph/osd.11
- copy ceph.conf ceph.client.admin.keyring 配置文件去其他节点
ceph-deploy admin zz-store01 zz-store02 zz-store03
- 添加osd去集群、
- 在ceph_node上查看集群硬盘,
ceph osd ls
ceph osd tree
- 创建ceph mds节点
ceph mds create zz-store01
- 添加集群文件去开机启动1
/dev/sdb1 /var/lib/ceph/osd/ceph-4 xfs rw,noexec,nodev,noatime,nodiratime,barrier=0 0 0
/dev/sdc1 /var/lib/ceph/osd/ceph-5 xfs rw,noexec,nodev,noatime,nodiratime,barrier=0 0 0
/dev/sdd1 /var/lib/ceph/osd/ceph-6 xfs rw,noexec,nodev,noatime,nodiratime,barrier=0 0 0
/dev/sde1 /var/lib/ceph/osd/ceph-7 xfs rw,noexec,nodev,noatime,nodiratime,barrier=0 0 0
2.修改/etc/rc.local
/etc/init.d/ceph restart
部署参考: