准备工作
准备三台服务器(有无网络都可以,ip主机名要设置好)
主机名 | IP | 功能 |
---|---|---|
master | 192.168.148.2 | mon, osd, mgr, mds |
node1 | 192.168.148.3 | mon, osd, mgr, mds |
node2 | 192.168.148.4 | mon, osd, mgr, mds |
拉取ceph镜像
# 我这里是拉取的ceph/daemon 4.2版本
docker pull ceph/daemon:v4.0.22-stable-4.0-nautilus-centos-7-x86_64
如果没有网络就save打包出来上传进去load
docker save -o tar文件名称 镜像REPOSITORY:TAG #打包
docker load -i tar文件名称 #加载镜像
在主节点中编写执行脚本
vi start_mon.sh
#!/bin/bash
docker run -d --net=host \
--name=mon \
-v /etc/localtime:/etc/localtime \
-v /usr/local/bin/ceph/etc:/etc/ceph \
-v /usr/local/bin/ceph/lib:/var/lib/ceph \
-v /usr/local/bin/ceph/logs:/var/log/ceph \
-e MON_IP=192.168.148.2 \
-e CEPH_PUBLIC_NETWORK=192.168.148.0/16 \
ceph/daemon:v4.0.22-stable-4.0-nautilus-centos-7-x86_64 mon
vi start_osd.sh
#!/bin/bash
docker run -d \
--name=osd \
--net=host \
--restart=always \
--privileged=true \
--pid=host \
-v /etc/localtime:/etc/localtime \
-v /usr/local/bin/ceph/etc:/etc/ceph \
-v /usr/local/bin/ceph/lib:/var/lib/ceph \
-v /usr/local/bin/ceph/logs:/var/log/ceph \
-v /data/ceph/osd:/var/lib/ceph/osd \
ceph/daemon:v4.0.22-stable-4.0-nautilus-centos-7-x86_64 osd_directory
vi start_mds.sh
#!/bin/bash
docker run -d \
--net=host \
--name=mds \
--privileged=true \
-v /etc/localtime:/etc/localtime \
-v /usr/local/bin/ceph/etc:/etc/ceph \
-v /usr/local/bin/ceph/lib:/var/lib/ceph \
-v /usr/local/bin/ceph/logs:/var/log/ceph \
-e CEPHFS_CREATE=0 \
-e CEPHFS_METADATA_POOL_PG=512 \
-e CEPHFS_DATA_POOL_PG=512 \
ceph/daemon:v4.0.22-stable-4.0-nautilus-centos-7-x86_64 mds
vi start_mgr.sh
#!/bin/bash
docker run -d --net=host \
--name=mgr \
-v /etc/localtime:/etc/localtime \
-v /usr/local/bin/ceph/etc:/etc/ceph \
-v /usr/local/bin/ceph/lib:/var/lib/ceph \
-v /usr/local/bin/ceph/logs:/var/log/ceph \
ceph/daemon:v4.0.22-stable-4.0-nautilus-centos-7-x86_64 mgr
vi start_rgw.sh
#!/bin/bash
docker run \
-d --net=host \
--name=rgw \
-v /usr/local/bin/ceph/lib:/var/lib/ceph/ \
-v /usr/local/bin/ceph/etc:/etc/ceph \
-v /etc/localtime:/etc/localtime \
ceph/daemon:v4.0.22-stable-4.0-nautilus-centos-7-x86_64 rgw
准备工作
# 三台机器全部都执行一遍
# 编写hosts文件
cat >>/etc/hosts <<EOF
192.168.148.2 master
192.168.148.3 node1
192.168.148.4 node2
EOF
# 三台机器全部都执行一遍
# 创建ceph目录
sudo mkdir -p /usr/local/bin/ceph/{admin,data,etc,lib,logs}
sudo chmod 777 -R /usr/local/bin/ceph/admin
sudo chmod 777 -R /usr/local/bin/ceph/data
sudo chmod 777 -R /usr/local/bin/ceph/etc
sudo chmod 777 -R /usr/local/bin/ceph/lib
sudo chmod 777 -R /usr/local/bin/ceph/logs
# 三台机器全部都执行一遍
# 创建osd挂载目录
sudo mkdir -p /data/etc/osd
sudo chmod 777 -R /data/etc/osd
# master执行即可
# 设置免密登录
ssh-keygen
ssh-copy-id node1
ssh-copy-id node2
安装ceph-mon
# 在master执行
sh start_mon.sh
# 在master执行
# 查看是否运行成功
docker ps
# 在master执行
# 检查Ceph状态
docker exec mon ceph -s
# 在master执行
vi /usr/local/bin/ceph/etc/ceph.conf
[global]
fsid = 171912aa-2b67-42e9-a988-37615b91f3e2
mon initial members = master
mon host = 192.168.148.2,192.168.148.3,192.168.148.4
public network = 192.168.148.0/16
cluster network = 192.168.148.0/16
osd journal size = 100
# 容忍更多的时钟误差
mon clock drift allowed = 2
mon clock drift warn backoff = 30
mon_max_pg_per_osd = 1000
# 推送到各节点:
# 允许删除pool
mon_allow_pool_delete = true
osd max object name len = 256
osd max object namespace len = 64
[mgr]
# 开启WEB仪表盘
mgr modules = dashboard
[client.rgw.ceph1]
# 设置rgw网关的web访问端口
rgw_frontends = "civetweb port=7480"
# 在master执行
# 复制文件到节点,这里最好是用root用户
sudo scp -r /usr/local/bin/ceph 你的用户名@node1:/usr/local/
sudo scp -r /usr/local/bin/ceph 你的用户名@node2:/usr/local/
#分别修改两台机器上ceph.conf的mon initial members为node1和node2
vi /usr/local/bin/ceph/etc/ceph.conf
# 分别在两台node上执行
sh start_mon.sh
# 在任意机器执行将会看到下面的内容
# 检查Ceph状态
docker exec mon ceph -s
cluster:
id: 171912aa-2b67-42e9-a988-37615b91f3e2
health: HEALTH_WARN
mons are allowing insecure global_id reclaim
services:
mon: 3 daemons, quorum master,node1,node2 (age 1m)
安装ceph-osd
# 分别在三台机器上执行生成osd的密钥信息
docker exec -it mon ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring
# 分别在三台机器上执行
sh start_osd.sh
# 在任意机器执行将会看到下面的内容
# 检查Ceph状态
docker exec mon ceph -s
cluster:
id: 171912aa-2b67-42e9-a988-37615b91f3e2
health: HEALTH_WARN
mons are allowing insecure global_id reclaim
services:
mon: 3 daemons, quorum master,node1,node2 (age 2m)
osd: 3 osds: 3 up (since 2d), 3 in (since 1m)
安装ceph-mgr
# 分别在三台机器上执行
sh start_mgr.sh
# 在任意机器执行将会看到下面的内容
# 检查Ceph状态
docker exec mon ceph -s
cluster:
id: 171912aa-2b67-42e9-a988-37615b91f3e2
health: HEALTH_WARN
mons are allowing insecure global_id reclaim
services:
mon: 3 daemons, quorum master,node1,node2 (age 3m)
osd: 3 osds: 3 up (since 2d), 3 in (since 2m)
mgr: master(active, since 2d), standbys: node1, node2
安装ceph-rgw
# 分别在三台机器上执行生成rgw的密钥信息
docker exec mon ceph auth get client.bootstrap-rgw -o /var/lib/ceph/bootstrap-rgw/ceph.keyring
# 分别在三台机器上执行
sh start_rgw.sh
#单节点时,rgw启动之后,ceph -s 查看可能会出现Degraded降级的情况,我们需要手动设置rgw pool的size 和 min_size为最小1
#ceph osd pool set .rgw.root min_size 1
#ceph osd pool set .rgw.root size 1
#ceph osd pool set default.rgw.control min_size 1
#ceph osd pool set default.rgw.control size 1
#ceph osd pool set default.rgw.meta min_size 1
#ceph osd pool set default.rgw.meta size 1
#ceph osd pool set default.rgw.log min_size 1
#ceph osd pool set default.rgw.log size 1
# 在任意机器执行将会看到下面的内容
# 检查Ceph状态
docker exec mon ceph -s
cluster:
id: 171912aa-2b67-42e9-a988-37615b91f3e2
health: HEALTH_WARN
mons are allowing insecure global_id reclaim
services:
mon: 3 daemons, quorum master,node1,node2 (age 3m)
osd: 3 osds: 3 up (since 2d), 3 in (since 2m)
mgr: master(active, since 2d), standbys: node1, node2
rgw: 3 daemons active (master, node1, node2)
安装ceph-mds
# 分别在三台机器上执行
sh start_mds.sh
# 在任意机器执行将会看到下面的内容
# 检查Ceph状态
docker exec mon ceph -s
cluster:
id: 171912aa-2b67-42e9-a988-37615b91f3e2
health: HEALTH_WARN
mons are allowing insecure global_id reclaim
services:
mon: 3 daemons, quorum master,node1,node2 (age 3m)
osd: 3 osds: 3 up (since 2d), 3 in (since 2m)
mds: cephfs:1 {0=master=up:active} 2 up:standby
mgr: master(active, since 2d), standbys: node1, node2
rgw: 3 daemons active (master, node1, node2)
CephFS部署
# 在master执行 创建Data Pool
sudo docker exec osd ceph osd pool create cephfs_data 128 128
# 在master执行 创建Metadata Pool
docker exec osd ceph osd pool create cephfs_metadata 64 64
# 在master执行 创建CephFS
docker exec osd ceph fs new cephfs cephfs_metadata cephfs_data
# 在master执行 查看FS信息
sudo docker exec osd ceph fs ls
安装Dashboard管理后台
# 在master执行 开启dashboard功能
docker exec mgr ceph mgr module enable dashboard
# 在master执行 创建证书
docker exec mgr ceph dashboard create-self-signed-cert
# 在master执行 设置用户名为admin, 密码为admin。
docker exec mgr ceph dashboard set-login-credentials admin admin
# 在master执行 配置外部访问端口
docker exec mgr ceph config set mgr mgr/dashboard/server_port 18080
# 在master执行 配置外部访问IP
docker exec mgr ceph config set mgr mgr/dashboard/server_addr 192.168.148.2
# 在master执行 关闭https(如果没有证书或内网访问, 可以关闭)
docker exec mgr ceph config set mgr mgr/dashboard/ssl false
# 在master执行 重启Mgr DashBoard服务
docker restart mgr
# 在master执行 查看Mgr DashBoard服务信息
docker exec mgr ceph mgr services
{
"dashboard": "http://master:18080/"
}
管理控制台界面: