搭建Ceph分布式集群存储,配置Ceph作为openstack后端的统一存储,为glance、nova、cinder、swift 提供存储支持。
(1)在dashboard界面创建三个Centos7虚拟机CEPH1,CEPH2,CEPH3,绑定浮动ip
(2)在ceph1生成密钥,使ceph1可以无密钥登陆到CEPH2,CEPH3
[root@ceph1 ~]# hostnamectl set-hostname ceph1
[root@ceph1 ~]# vi /etc/hosts
10.0.0.155 ceph1
10.0.0.156 ceph2
10.0.0.157 ceph3
[root@ceph1 ~]# ssh-keygen
[root@ceph1 ~]# ssh-copy-id root@ceph2
[root@ceph1 ~]# ssh-copy-id root@ceph3
[root@ceph1 ~]# setenforce 0
[root@ceph1 ~]# cat /etc/selinux/config
[root@ceph2 ~]# hostnamectl set-hostname ceph2
[root@ceph2 ~]# vi /etc/hosts
10.0.0.155 ceph1
10.0.0.156 ceph2
10.0.0.157 ceph3
[root@ceph2 ~]# setenforce 0
[root@ceph2 ~]# cat /etc/selinux/config
[root@ceph3 ~]# hostnamectl set-hostname ceph3
[root@ceph3 ~]# vi /etc/hosts
10.0.0.155 ceph1
10.0.0.156 ceph2
10.0.0.157 ceph3
[root@ceph3 ~]# setenforce 0
[root@ceph3 ~]# cat /etc/selinux/config
(3)安装时钟同步服务
[root@ceph1 ~]# rm -rfv /etc/yum.repos.d/*
[root@ceph1 ~]# scp root@192.168.100.20:/etc/yum.repos.d/ftp.repo /etc/yum.repos.d/
[root@ceph1 ~]# vi /etc/yum.repos.d/ftp.repo
[centos]
name=centos
baseurl=ftp://192.168.100.10/centos
gpgcheck=0
enabled=1
[iaas]
name=iaas
baseurl=ftp://192.168.100.10/iaas/iaas-repo
gpgcheck=0
enabled=1
[root@ceph1 ~]# yum repolist
[root@ceph1 ~]# yum -y install ntp ntpdate
[root@ceph1 ~]# sed -i -e '/server/d' -e "/fudge/d" /etc/ntp.conf
[root@ceph1 ~]# ntpdate 192.168.100.10
[root@ceph1 ~]# systemctl enable ntpd
[root@ceph2 ~]# rm -rfv /etc/yum.repos.d/*
[root@ceph2 ~]# scp root@ceph1:/etc/yum.repos.d/ftp.repo /etc/yum.repos.d/
[root@ceph2 ~]# yum -y install ntp ntpdate
[root@ceph2 ~]# sed -i -e '/server/d' -e "/fudge/d" /etc/ntp.conf
[root@ceph2 ~]# ntpdate 192.168.100.10
[root@ceph2 ~]# systemctl enable ntpd
[root@ceph3 ~]# rm -rfv /etc/yum.repos.d/*
[root@ceph3 ~]# scp root@ceph1:/etc/yum.repos.d/ftp.repo /etc/yum.repos.d/
[root@ceph3 ~]# yum -y install ntp, ntpdate
[root@ceph3 ~]# sed -i -e '/server/d' -e "/fudge/d" /etc/ntp.conf
[root@ceph3 ~]# ntpdate 192.168.100.10
[root@ceph3 ~]# systemctl enable ntpd
(4)创建Ceph集群
[root@ceph1 ~]# yum install ceph-deploy -y
[root@ceph1 ~]# mkdir /etc/ceph
[root@ceph1 ~]# cd /etc/ceph/
[root@ceph1 ceph]# ceph-deploy new ceph1
[root@ceph1 ceph]# ceph-deploy install ceph1 ceph2 ceph3
[root@ceph1 ceph]# ceph -v
[root@ceph1 ceph]# ceph-deploy --overwrite-conf mon create-initial
[root@ceph1 ceph]# ceph -s
(5)创建OSD
[root@ceph1 ceph]# ceph-deploy disk list ceph1
[root@ceph1 ceph]# mkdir /opt/osd1
[root@ceph1 ceph]# chmod 777 /opt/osd1
[root@ceph1 ceph]# ssh ceph2
[root@ceph2 ~]# mkdir /opt/osd2
[root@ceph2 ~]# chmod 777 /opt/osd2
[root@ceph2 ~]# exit
[root@ceph1 ceph]# ssh ceph3
[root@ceph3 ~]# mkdir /opt/osd3
[root@ceph3 ~]# chmod 777 /opt/osd3
[root@ceph3 ~]# exit
[root@ceph1 ceph]# ceph-deploy osd prepare ceph1:/opt/osd1 ceph2:/opt/osd2 ceph3:/opt/osd3
[root@ceph1 ceph]# ceph-deploy osd activate ceph1:/opt/osd1 ceph2:/opt/osd2 ceph3:/opt/osd3
[root@ceph1 ceph]# ceph -s
[root@ceph1 ceph]# ceph-deploy admin ceph{1,2,3}
(6)ceph运维
[root@ceph1 ceph]# cd /root
[root@ceph1 ~]# ceph status //ceph的安装状态
[root@ceph1 ~]# ceph -w //ceph集群的健康状态
[root@ceph1 ~]# ceph quorum_status --format json-pretty //ceph monitor仲裁状态
[root@ceph1 ~]# ceph mon dump //导出ceph monitor信息
[root@ceph1 ~]# ceph df //检查集群使用状态
[root@ceph1 ~]# ceph mon stat //检查ceph monitor状态
[root@ceph1 ~]# ceph osd stat //检查OSD状态
[root@ceph1 ~]# ceph pg stat //检查PG(配置组)状态
[root@ceph1 ~]# ceph pg dump //列表pg
[root@ceph1 ~]# ceph osd lspools //列表ceph存储池
[root@ceph1 ~]# ceph osd tree //检查osd的CRUSH
[root@ceph1 ~]# ceph auth list //列表集群的认证密钥
20.登录192.168.100.10/dashboard,使用centos7镜像创建三台云主机来搭建Ceph 分布式集群存储;新建/opt/osd目录作为Ceph存储后端;在搭建Ceph集群后, 使用命令查看集群状态。
[root@ceph-node1 ceph]# ceph -s
21.错误现象为所有组件均无法正常使用;试修复上述错误并查询openstack用户列表信息。
[root@xiandian ~]# openstack user list