环境准备
主机名 | 分配角色 | address |
---|
ceph1 | ceph-deploy+client | 192.168.5.26 |
ceph2 | mon+osd | 192.168.5.27 |
ceph3 | mon+osd | 192.168.5.28 |
开始部署
yum install wget -y
cd /etc/yum.repos.d
mv CentOS-Base.repo CentOS-Base.repo.bak
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum makecache
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --disablerepo=\* --enablerepo=elrepo-kernel repolist
yum --disablerepo=\* --enablerepo=elrepo-kernel install kernel-ml.x86_64 -y
yum remove kernel-tools-libs.x86_64 kernel-tools.x86_64 -y
yum --disablerepo=\* --enablerepo=elrepo-kernel install kernel-ml-tools.x86_64 -y
awk -F \' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
grub2-editenv list
grub2-set-default 0
reboot
uname -r
yum update -y && yum install epel-release -y
hostnamectl set-hostname ceph1
hostnamectl set-hostname ceph2
hostnamectl set-hostname ceph3
vim /etc/hosts
192.168.5.26 ceph1
192.168.5.27 ceph2
192.168.5.28 ceph3
systemctl stop firewalld
systemctl disable firewalld
- 设置免密登录 (在主节点 192.168.5.26 配置 )
ssh-keygen
ssh-copy-id root@ceph2
ssh-copy-id root@ceph3
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
yum install ntp -y
systemctl enable ntpd
systemctl start ntpd
ntpstat
vim /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=https://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc
priority=1
- 安装 ceph-deploy(在主节点 192.168.5.26 执行 )
yum install ceph-deploy -y
mkdir ceph-cluster
cd ceph-cluster
- 安装 epel-release (所有节点均 执行 )
yum install epel-release -y
- 创建集群 (在主节点 192.168.5.26 执行 )
ceph-deploy new ceph1 ceph2 ceph3
vim ceph.conf
public network = 192.168.5.0/24
cluster network = 192.168.5.0/24
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 128
osd pool default pgp num = 128
osd pool default crush rule = 0
osd crush chooseleaf type = 1
max open files = 131072
ms bing ipv6 = false
[mon]
mon clock drift allowed = 10
mon clock drift warn backoff = 30
mon osd full ratio = .95
mon osd nearfull ratio = .85
mon osd down out interval = 600
mon osd report timeout = 300
mon allow pool delete = true
[osd]
osd recovery max active = 3
osd max backfills = 5
osd max scrubs = 2
osd mkfs type = xfs
osd mkfs options xfs = -f -i size=1024
osd mount options xfs = rw,noatime,inode64,logbsize=256k,delaylog
filestore max sync interval = 5
osd op threads = 2
- 安装Ceph软件到指定节点(在主节点 192.168.5.26 执行 )
ceph-deploy install --no-adjust-repos ceph1 ceph2 ceph3
- 部署初始的monitors,并获得keys(在主节点 192.168.5.26 执行 )
ceph-deploy mon create-initial
- 将生成的配置文件 以及密钥文件 复制到各节点(在主节点 192.168.5.26 执行 )
ceph-deploy admin ceph1 ceph2 ceph3
- 配置ceph-mgr(在主节点 192.168.5.26 执行 )
ceph-deploy mgr create cephadmin
- 配置OSD(在主节点 192.168.5.26 执行)
ceph-deploy osd create --data /dev/sdb ceph1
ceph-deploy osd create --data /dev/sdb ceph2
ceph-deploy osd create --data /dev/sdb ceph3
yum install ceph-mgr-dashboard -y
vim ceph.conf
[mon]
mgr initial modules = dashboard
ceph-deploy --overwrite-conf config push ceph1 ceph2 ceph3
systemctl restart ceph-mgr@ceph1 ceph-mgr@ceph2 ceph-mgr@ceph3
ceph config-key set mgr/dashboard/server_port 8080
ceph config-key set mgr/dashboard/server_addr 192.168.5.26
ceph config set mgr mgr/dashboard/ssl false
ceph mgr module disable dashboard
ceph mgr module enable dashboard
ceph dashboard set-login-credentials admin qwe@qwe123
- 到此 安装ceph环境完成 可以登录 所设置的ip 查看
参考原文