本次安装部署采用ceph 10.2.5 版本
1.初始化节点
1) osd节点 disk 初始化
在每台 osd 服务器上,我们需要对10块 SAS 硬盘分区、创建 xfs 文件系统;对2块做 journal 的 SSD 硬盘分5个区,每个区对应一块SAS硬盘,不创建文件系统。
]# vi parted.sh
#!/bin/bash
set -e
if [ ! -x "/sbin/parted" ]; then
echo "This script requires /sbin/parted to run!" >&2
exit 1
fi
DISKS="a b d e g h i j k l"
for i in ${DISKS}; do
echo "Creating partitions on /dev/sd${i} ..."
parted -a optimal --script /dev/sd${i} -- mktable gpt
parted -a optimal --script /dev/sd${i} -- mkpart primary xfs 0% 100%
sleep 1
#echo "Formatting /dev/sd${i}1 ..."
mkfs.xfs -f /dev/sd${i}1 &
done
SSDS="c f"
for i in ${SSDS}; do
parted -s /dev/sd${i} mklabel gpt
parted -s /dev/sd${i} mkpart primary 0% 20%
parted -s /dev/sd${i} mkpart primary 21% 40%
parted -s /dev/sd${i} mkpart primary 41% 60%
parted -s /dev/sd${i} mkpart primary 61% 80%
parted -s /dev/sd${i} mkpart primary 81% 100%
done
]# bash parted.sh
2) mon 和osd节点 os 初始化
配置 EPEL 源:
]# sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
]# yum clean all
安装 ntp 同步时间
]# yum -y install ntp ntpdate ntp-doc
]# systemctl enable ntpd.service
]# systemctl start ntpd.service
防火墙放开 Ceph 所需要的端口
]# firewall-cmd --zone=public --add-port=6789/tcp --permanent
]# firewall-cmd --zone=public --add-port=6800-7100/tcp --permanent
]# firewall-cmd --reload
关闭 SELINUX
]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
]# setenforce 0
3) 部署节点os初始化
配置节点免密码登录
]# ssh-keygen
]# ssh-copy-id root@ceph-mon1
]# ssh-copy-id root@ceph-mon2
]# ssh-copy-id root@ceph-mon3
配置主机host
]# vi /etc/hosts
ceph-mon1 10.126.72.59
ceph-mon2 10.126.72.60
ceph-mon3 10.126.72.61
ceph-osd1 10.126.72.62
ceph-osd2 10.126.72.63
ceph-osd3 10.126.72.64
- 部署
配置ceph源的环境变量
]# export CEPH_DEPLOY_REPO_URL=http://mirrors.163.com/ceph/rpm-jewel/el7
]# export CEPH_DEPLOY_GPG_URL=http://mirrors.163.com/ceph/keys/release.asc
]# yum install -y openssh-server yum-plugin-priorities ceph-deploy
]# mkdir ~/ceph-cluster
]# cd ~/ceph-cluster
]# ceph-deploy new ceph-mon1 ceph-mon2 ceph-mon3
]# ceph-deploy install ceph-mon1 ceph-mon2 ceph-mon3 ceph-osd1 ceph-osd2
]# ceph-deploy mon create-initial
]# ceph-deploy disk zap ceph-osd1:sda ceph-osd1:sdb
]# ceph-deploy osd prpare ceph-osd1:sda:/dev/sdc1 ceph-osd1:sdb:/dev/sdc2
]# ceph-deploy osd activate ceph-osd1:sda1:/dev/sdc1 ceph-osd1:sdb1:/dev/sdc2