ceph离线环境集群搭建

环境准备

主机名IP系统ceph版本
ceph-1192.168.75.129/24CentOS7.9nautilus 14.2.22
ceph-2192.168.75.130/24CentOS7.9nautilus 14.2.22
ceph-3192.168.75.131/24CentOS7.9nautilus 14.2.22

每台机有额外加的两块硬盘:sdb , sdc

事先找一台可联网并同版本系统的机器,配置阿里源、epel源、ceph源把安装包缓存下来

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

cat > /etc/yum.repos.d/ceph.repo <<EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
gpgcheck=0
priority=1

[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
gpgcheck=0
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
gpgcheck=0
priority=1
EOF

#安装createrepo

yum -y install yum-utils createrepo

#下载相关安装包

mkdir /root/ceph ; cd /root/ceph ; yum update -y --downloadonly --downloaddir=. ; yum install ceph ceph-deploy ceph-common ceph-fuse ceph-mgr-dashboard -y --downloadonly --downloaddir=.

#制作源

createrepo /root/ceph

#打包带走

cd ; tar zcvf ceph.tar.gz ceph

#scp到内网环境中解压,然后挂载本地iso,配置本地yum源

cd ~
tar zxvf ceph.tar.gz
cd /etc/yum.repos.d
mkdir /etc/yum.repos.d/bak
mv *.repo bak
mount /dev/sr0 /mnt

cat > /etc/yum.repos.d/ceph.repo << EOF
[ceph]
name=ceph
baseurl=file:///root/ceph
enabled=1
gpgcheck=0
EOF

cat > /etc/yum.repos.d/base.repo << EOF
[base]
name=base
baseurl=file:///mnt
enabled=1
gpgcheck=0
EOF

#每台节点安装ceph

yum -y install ceph ceph-common

#主节点需额外安装ceph-fuse和ceph-deploy

yum -y install ceph-deploy ceph-fuse

#集群时间同步,这边步骤跳过了

#以下步骤在主节点的/etc/ceph目录下执行

#免密

ssh-keygen
for i in {ceph-2,ceph-3} ; do ssh-copy-id ${i} ; done

#初始化集群

ceph-deploy new --cluster-network 192.168.75.0/24 --public-network 192.168.75.0/24 ceph-1

#配置2副本

echo 'osd_pool_default_size = 2' >> /etc/ceph/ceph.conf

#安装mon组件

ceph-deploy mon create ceph-1

#收集密钥

ceph-deploy gatherkeys ceph-1 ceph-2 ceph-3

#安装mgr组件

ceph-deploy mgr create ceph-1

#安装mds组件

ceph-deploy mds create ceph-1

#推送密钥到集群节点

ceph-deploy admin ceph-2 ceph-3

#给密钥环文件加读权限

for i in {ceph-2,ceph-3} ; do ssh ${i} -C "chmod +r /etc/ceph/ceph.client.admin.keyring" ; done

#擦盘

for i in {b,c};do ceph-deploy disk zap ceph-1 /dev/sd${i};done
for i in {b,c};do ceph-deploy disk zap ceph-2 /dev/sd${i};done
for i in {b,c};do ceph-deploy disk zap ceph-3 /dev/sd${i};done

#创建osd

for i in {b,c};do ceph-deploy osd create ceph-1 --data /dev/sd${i};done
for i in {b,c};do ceph-deploy osd create ceph-2 --data /dev/sd${i};done
for i in {b,c};do ceph-deploy osd create ceph-3 --data /dev/sd${i};done

#创建存储池

#创建存储池
ceph osd pool create cephfs_data 32
#创建元数据存储池
ceph osd pool create cephfs_metadata 32
#创建文件系统
ceph fs new cephfs cephfs_metadata cephfs_data

#此时ceph -s查看集群状态是WARN,因为此时启用了不安全模式,需要禁用

[root@ceph-1 ceph]# ceph -s 
  cluster:
    id:     9b361bf1-362c-4e80-b3d3-5b51a52e966a
    health: HEALTH_WARN
            mon is allowing insecure global_id reclaim

  services:
    mon: 1 daemons, quorum ceph-1 (age 26m)
    mgr: ceph-1(active, since 21m)
    mds: cephfs:1 {0=ceph-1=up:active}
    osd: 6 osds: 6 up (since 4m), 6 in (since 4m)

  data:
    pools:   2 pools, 64 pgs
    objects: 22 objects, 2.2 KiB
    usage:   6.0 GiB used, 114 GiB / 120 GiB avail
    pgs:     64 active+clean 

[root@ceph-1 ceph]# ceph config set mon auth_allow_insecure_global_id_reclaim false

[root@ceph-1 ceph]# ceph -s 
  cluster:
    id:     9b361bf1-362c-4e80-b3d3-5b51a52e966a
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum ceph-1 (age 36m)
    mgr: ceph-1(active, since 31m)
    mds: cephfs:1 {0=ceph-1=up:active}
    osd: 6 osds: 6 up (since 14m), 6 in (since 14m)

  task status:

  data:
    pools:   2 pools, 64 pgs
    objects: 22 objects, 2.2 KiB
    usage:   6.0 GiB used, 114 GiB / 120 GiB avail
    pgs:     64 active+clean

#本次实验使用cephfs文件系统,使用ceph-fuse挂载即可

mkdir /test
ceph-fuse -m ceph-1 /test

#开启mgr dashboard功能

yum install -y ceph-mgr-dashboard
ceph mgr module enable dashboard

#生成并安装自签名的证书

ceph dashboard create-self-signed-cert

#创建一个dashboard登录用户名密码,用户密码均为admin

echo "admin" >/root/password.txt
ceph dashboard ac-user-create admin administrator -i /root/password.txt

#查看dashboard服务是否已开启

ceph mgr services
{
    "dashboard": "https://xxxxx:8443/"
}

 

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值