以下步骤只是测试环境使用,请谨慎使用
CentOS7环境下ceph安装部署
Ceph部署
1、主机准备
实验环境为Vmware虚拟机机上进行实验,主要是为了对ceph有直观的认识。
第一步:准备5台主机
IP地址 主机名(Hostname)
192.168.1.110 admin-node (该主机用于管理,后续的ceph-deploy工具都在该主机上进行操作)
192.168.1.111 node1 (监控节点)
192.168.1.112 node2 (osd.0节点)
192.168.1.113 node3 (osd.1节点)
192.168.1.114 client-node (客服端,主要利用它挂载ceph集群提供的存储进行测试)
各个节点相同配置:
1.配置ntp
yum -y install ntp
cp /etc/ntp.conf{,.bak}
2.配置hosts
第二步:修改各节点/etc/hosts文件,增加一下内容
192.168.1.111 node1
192.168.1.112 node2
192.168.1.113 node3
192.168.1.114 client-node
说明:ceph-deploy工具都是通过主机名与其他节点通信。
修改主机名的命令为:hostnamectl set-hostname "新的名字"或者直接修改/etc/hostname
3.禁用selinux,并重启
4.visudo
#Defaults requiretty
在部署节点上运行:
1.允许无密码 SSH 登录
[root@admin-node ~]# ssh-keygen
[root@admin-node ~]#ssh-copy-id node1
[root@admin-node ~]#ssh-copy-id node2
[root@admin-node ~]#ssh-copy-id node3
[root@admin-node ~]#ssh-copy-id admin-node
[test123@admin-node ~]$ vim ~/.ssh/config
Host node1
Hostname node1
User root
Host node2
Hostname node2
User root
Host node3
Hostname node3
User root
3.sudo yum install -y yum-utils && sudo yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/7/x86_64/ && sudo yum install --nogpgcheck -y epel-release && sudo rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 && sudo rm /etc/yum.repos.d/dl.fedoraproject.org*
4.sudo vim /etc/yum.repos.d/ceph.repo
[root@admin-node my-cluster]# cd /etc/yum.repos.d/
[root@admin-node yum.repos.d]# cat ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=http://download.ceph.com/rpm-hammer/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://download.ceph.com/rpm-hammer/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://download.ceph.com/rpm-hammer/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
priority=1
sudo yum update && sudo yum install ceph-deploy
sudo yum install yum-plugin-priorities
在部署节点上切换到root用户:
mkdir my-cluster
cd my-cluster
ceph-deploy new node1
编辑ceph.conf
osd_pool_default size = 2
public network = 192.168.122.0/24 #可省略
ceph-deploy install admin-node node1 node2 node3
ceph-deploy mon create-initial
ssh node2
sudo mkdir /var/local/osd0
exit
ssh node3
sudo mkdir /var/local/osd1
exit
ceph-deploy osd prepare node2:/var/local/osd0 node3:/var/local/osd1
ceph-deploy osd activate node2:/var/local/osd0 node3:/var/local/osd1
出现的问题:
[node3][WARNIN] 2016-05-17 17:47:38.010044 7f91dc118800 -1 ** ERROR: error creating empty object store in /var/local/osd1: (13) Permission denied
解决方法:
[root@node2 ~]# ll -ld /var/local/osd0/
drwxr-xr-x 2 root root 80 May 17 17:32 /var/local/osd0/
[root@node2 ~]# chown -R ceph.ceph /var/local/osd0/
ceph-deploy admin admin-node node1 node2 node3
sudo chmod +r /etc/ceph/ceph.client.admin.keyring
ceph health