cephadm集群搭建
-
操作系统现在ubuntu-server22.04版本
主机名 | IP |
---|
node1 | 10.0.0.10 |
node2 | 10.0.0.11 |
node3 | 10.0.0.12 |
操作系统 | 虚拟化软件 |
---|
Ubuntu22.04 | VMware15 |
1.配置hosts解析(所有节点)
cat >> /etc/hosts <<EOF
10.0.0.10 node1
10.0.0.11 node2
10.0.0.12 node3
EOF
2.制作离线源(所有节点)
tar zxvf ceph_quincy.tar.gz -C /opt/
cp /etc/apt/sources.list{,.bak}
cat > /etc/apt/sources.list << EOF
deb [trusted=yes] file:// /opt/ceph_quincy/debs/
EOF
apt-get clean all
apt-get update
3.配置时间同步
timedatectl set-ntp true
timedatectl set-timezone Asia/Shanghai
hwclock --systohc
4.安装docker(所有节点)
apt -y install docker-ce
5.安装cephadm(node1)
apt install -y cephadm
6.导入ceph镜像(所有节点)
docker load -i cephadm_images_v17.tar
6.1.搭建制作本地仓库(node1)
docker load -i registry.tar
docker run -d --name registry -p 5000:5000 --restart always 3a0f7b0a13ef
cat >> /etc/docker/daemon.json << EOF
{
"insecure-registries":["10.0.0.10:5000"]
}
EOF
systemctl daemon-reload
systemctl restart docker
docker tag 0912465dcea5 10.0.0.10:5000/ceph:v17
docker push 10.0.0.10:5000/ceph:v17
6.2.配置私有仓库
cat >> /etc/docker/daemon.json << EOF
{
"insecure-registries":["10.0.0.10:5000"]
}
EOF
systemctl daemon-reload
systemctl restart docker
7.引导集群(node1)
mkdir -p /etc/ceph
cephadm --image 10.0.0.10:5000/ceph:v17 bootstrap --mon-ip 10.0.0.10 --initial-dashboard-user admin --initial-dashboard-password 000000 --skip-pull
ceph config set mgr mgr/dashboard/ssl_server_port 5050
ceph config set mgr mgr/dashboard/ssl false
ceph config set mgr mgr/dashboard/server_addr 192.168.18.130
ceph config set mgr mgr/dashboard/server_port 5050
ceph mgr module disable dashboard
ceph mgr module enable dashboard
ceph orch apply mon "test01,test02,test03"
cephadm rm-cluster --fsid d92b85c0-3ecd-11ed-a617-3f7cf3e2d6d8 --force
8.安装ceph-common工具(node1)
apt install -y ceph-common
9.添加主机到集群(node1)
ssh-copy-id -f -i /etc/ceph/ceph.pub node2
ssh-copy-id -f -i /etc/ceph/ceph.pub node3
ceph orch host add node2
ceph orch host add node3
10.部署OSD
ceph orch device ls
ceph orch apply osd --all-available-devices
PS:
ceph orch daemon add osd node1:/dev/sdb
ceph orch daemon add osd node2:/dev/sdb
ceph orch daemon add osd node3:/dev/sdb
ceph -s
ceph df
11.部署MDS
- CephFS 需要两个 Pools,cephfs-data 和 cephfs-metadata,分别存储文件数据和文件元数据
ceph osd pool create cephfs-metadata 16 16
ceph osd pool create cephfs-data 32 32
ceph fs new cephfs cephfs-metadata cephfs-data
ceph orch apply mds cephfs --placement="3 node1 node2 node3"
ceph -s
12.部署RGW
ceph orch apply rgw myorg cn-east-1 --placement="3 node1 node2 node3"
ceph orch ls