文章目录
1.环境准备
操作系统:CentOS-8.5.2111-x86_64-dvd1.iso
Ceph版本:Quincy-17.2.1
1.1 关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
1.2 关闭Selinux
sed -i ‘/^SELINUX=.*/c SELINUX=disabled’ /etc/selinux/config
setenforce 0
reboot
1.3 配置yum源
rename .repo .repo.bak CentOS-Linux-*.repo
cat<<END>/etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-quincy/el8/x86_64/
enabled=1
gpgcheck=0
type=rpm-md
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-quincy/el8/x86_64/
enabled=1
gpgcheck=0
type=rpm-md
[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-quincy/el8/x86_64/
enabled=1
gpgcheck=0
type=rpm-md
END
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-8.repo
sed -i -e ‘/mirrors.cloud.aliyuncs.com/d’ -e ‘/mirrors.aliyuncs.com/d’ /etc/yum.repos.d/CentOS-Base.repo
yum clean all && yum makecache
1.4 时间同步
dnf install chrony -y
systemctl start chronyd.service && systemctl enable chronyd.service
timedatectl set-timezone "Asia/Shanghai"
vi /etc/chrony.conf
#pool 2.centos.pool.ntp.org iburst
server ntp.aliyun.com iburst
server cn.ntp.org.cn iburst
systemctl restart chronyd.service
# 执行时间同步操作
chronyc sources -v
# 查看当前系统时间
date
1.5 cephadm下载
curl --silent --remote-name --location https://github.com/ceph/ceph/raw/quincy/src/cephadm/cephadm
chmod +x cephadm
2.部署
2.1 环境依赖
dnf install python3 podman
更换podman国内源
cat <<END> /etc/containers/registries.conf
[registries.search]
registries = ['0k0953tv.mirror.aliyuncs.com', 'registry.redhat.io', 'docker.io']
[registries.insecure]
registries = []
[registries.block]
registries = []
unqualified-search-registries = ["registry.fedoraproject.org", "registry.access.redhat.com", "registry.centos.org", "docker.io"]
END
2.2 cephadm部署集群
在三个节点上运行
./cephadm add-repo --release quincy
./cephadm install
mkdir -p /etc/ceph
在 ceph-01节点上运行
cephadm bootstrap --mon-ip 192.168.120.71
三个节点都需要安装
cephadm install ceph-common
2.3 扩容节点
# 在 ceph-01节点执行
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-01
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-02
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-03
# 或者,需要在集群三个节点,都执行以下命令,保证集群间所有节点都是免密登录的:
ssh-keygen -t rsa
ssh-copy-id -f -i /root/.ssh/id_rsa.pub root@ceph-01
ssh-copy-id -f -i /root/.ssh/id_rsa.pub root@ceph-02
ssh-copy-id -f -i /root/.ssh/id_rsa.pub root@ceph-03
# 添加节点到集群
ceph orch host add ceph-02 192.168.120.72
ceph orch host add ceph-03 192.168.120.73
# 设置标签 _admin,标签设置完毕,会自动 拷贝 ceph.conf,ceph.client.admin.keyring文件
ceph orch host label add ceph-02 _admin
ceph orch host label add ceph-03 _admin
# 如果没有设置 _admin 标签,需要手动拷贝配置
scp /etc/ceph/ceph.conf root@ceph-02:/etc/ceph/
scp /etc/ceph/ceph.conf root@ceph-03:/etc/ceph/
scp /etc/ceph/ceph.client.admin.keyring root@ceph-02:/etc/ceph/
scp /etc/ceph/ceph.client.admin.keyring root@ceph-03:/etc/ceph/
2.4 添加OSD服务
# 注意:下面的 /dev/sdb 是在节点机器上的一个未使用的磁盘
ceph orch daemon add osd ceph-01:/dev/sdb
ceph orch daemon add osd ceph-02:/dev/sdb
ceph orch daemon add osd ceph-03:/dev/sdb
2.5 创建一个文件系统卷
ceph fs volume create testfs
# 设置一下副本数量
ceph orch apply mds testfs --placement="2 ceph-02 ceph-03"
# 在节点 ceph-01 上添加 mds 服务
ceph orch daemon add mds testfs ceph-01
# 查看mds状态
ceph mds stat
# 查看一下文件系统状态
ceph fs status testfs