cephadm 搭建分布式存储

准备环节

修改网卡自启动 并追加 dns


# 修改文件 /etc/sysconfig/network-scripts/ifcfg-eth0
# 注意 ifcfg-eth0 根据网卡的不同,也可能是其他名称
sudo sed -i 's/ONBOOT="no"/ONBOOT="yes"/g' /etc/sysconfig/network-scripts/ifcfg-eth0
echo -e  '\nDNS1="114.114.114.114"' >> /etc/sysconfig/network-scripts/ifcfg-eth0
service network restart

更改 yum 镜像源


cp -a /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak

wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo

yum clean all
yum makecache

#启用 epel

yum install epel-release -y

cp -a /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel.repo.backup
mv /etc/yum.repos.d/epel-testing.repo /etc/yum.repos.d/epel-testing.repo.backup

sed -i "s/#baseurl/baseurl/g" /etc/yum.repos.d/epel.repo
sed -i "s/metalink/#metalink/g" /etc/yum.repos.d/epel.repo
sed -i "s@https\?://download.fedoraproject.org/pub@https://repo.huaweicloud.com@g" /etc/yum.repos.d/epel.repo

yum update -y

关闭防火墙

systemctl stop firewalld.service
systemctl disable firewalld.service
systemctl status firewalld.service

安装docker



sudo yum remove docker docker-common docker-selinux docker-engine
sudo yum install -y yum-utils device-mapper-persistent-data lvm2


wget -O /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo


sudo sed -i 's+download.docker.com+repo.huaweicloud.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo


sudo yum makecache fast
sudo yum install docker-ce -y


更改docker 镜像源


sudo docker login --username=漱石者枕夏目 registry.cn-hangzhou.aliyuncs.com

sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://wm12hkla.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker


安装python36


yum install epel-release -y
yum install python36 -y

curl -O https://bootstrap.pypa.io/get-pip.py
/usr/bin/python3.6 get-pip.py

时间同步


yum -y install chrony

vim /etc/chrony.conf
###################
allow 192.168.141.0/24
###################
systemctl enable chronyd
systemctl restart chronyd

yum -y install chrony
echo "server 192.168.141.20 iburst" > /etc/chrony.conf
systemctl enable chronyd
systemctl restart chronyd
chronyc sources

生成 ssh 并分发到其他主机


ssh-keygen -t rsa -C "limengkai@zettakit"

ssh-copy-id -f -i /root/.ssh/id_rsa.pub root@192.168.141.23

ssh-copy-id -f -i /root/.ssh/id_rsa.pub root@192.168.141.24


cephadm 是什么

cephadm 是容器搭建 ceph 的 一种新方式

创建一个规格不小的虚拟机

按照ceph 官网 推荐,大于 24 g

安装cephadm


curl --silent --remote-name --location https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm

chmod +x cephadm

./cephadm --help

./cephadm add-repo --release octopus
./cephadm install

which cephadm

/usr/sbin/cephadm

--docker 

To bootstrap the cluster


mkdir -p /etc/ceph

# cephadm bootstrap --mon-ip <you_pmon_ip>
cephadm  --docker  bootstrap --mon-ip 192.168.141.20

enable ceph cli


cephadm shell 


cephadm shell -- ceph -s 

cephadm add-repo --release octopus
cephadm install ceph-common

# than you can diracet use

ceph -v
ceph status

ADD HOSTS TO THE CLUSTER

To add each new host to the cluster, perform two steps:

Install the cluster’s public SSH key in the new host’s root user’s authorized_keys file:


ssh-copy-id -f -i /etc/ceph/ceph.pub root@*<new-host>*

For example:


ssh-copy-id -f -i /etc/ceph/ceph.pub root@host2
ssh-copy-id -f -i /etc/ceph/ceph.pub root@host3


Tell Ceph that the new node is part of the cluster:

ceph orch host add newhost
For example:


ceph orch host add host2
ceph orch host add host3

add my node


ssh-copy-id -f -i /etc/ceph/ceph.pub root@192.168.141.23
ssh-copy-id -f -i /etc/ceph/ceph.pub root@192.168.141.24

ceph orch host add  host-192-168-141-23
ceph orch host add  host-192-168-141-24


add additional mon


ceph config set mon public_network *<mon-cidr-network>*
ceph config set mon public_network 192.168.0.0/16

add osd


ceph orch device ls

ceph orch device zap host-192-168-141-20 /dev/sdb --force
ceph orch device zap host-192-168-141-23 /dev/sdb --force
ceph orch device zap host-192-168-141-24 /dev/sdb --force

ceph orch daemon add osd host-192-168-141-20:/dev/sdb
ceph orch daemon add osd host-192-168-141-23:/dev/sdb
ceph orch daemon add osd host-192-168-141-24:/dev/sdb

ceph orch device ls



orch device ls [<hostname>...] [plain|json|json-pretty|yaml] [--refresh] [--wide]            List devices on a host
orch device zap <hostname> <path> [--force]                                                  Zap (erase!) a device so it can be re-used


# 成功创建三个 osd ceph 设备
[root@host-192-168-141-20 ~]# ceph orch device ls
Hostname             Path      Type  Serial                                Size   Health   Ident  Fault  Available  
host-192-168-141-20  /dev/sdb  hdd   32007f50-1e38-4a84-8f63-1e69c674f43d  53.6G  Unknown  N/A    N/A    No         
host-192-168-141-23  /dev/sdb  hdd   0652ea45-425d-40cd-bd20-b2e50123e9bf  32.2G  Unknown  N/A    N/A    No         
host-192-168-141-24  /dev/sdb  hdd   32ec781d-cea2-4f67-b1e1-78e7a1104940  32.2G  Unknown  N/A    N/A    No  

创建ceph文件系统


ceph osd pool create cephfs_data 32
ceph osd pool create cephfs_metadata  32

ceph fs new my_fs cephfs_metadata cephfs_data 

ceph fs ls

其中:<pg_num> = 128 ,

关于创建存储池

确定 pg_num 取值是强制性的,因为不能自动计算。下面是几个常用的值:

  *少于 5 个 OSD 时可把 pg_num 设置为 128

创建 mdss


ceph orch apply mds *<fs-name>* --placement="*<num-daemons>* [*<host1>* ...]"

ceph fs ls
output:  name: my_fs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

ceph orch apply mds my_fs  --placement="host-192-168-141-20 host-192-168-141-23 host-192-168-141-24"
output:   Scheduled mds.my_fs update..

ceph mds stat
output:   my_fs:1 {0=my_fs.host-192-168-141-20.tfeucj=up:active} 2 up:standby

创建 DEPLOY RGWS


radosgw-admin realm create --rgw-realm=my_realm --default
radosgw-admin zonegroup create --rgw-zonegroup=my_zonegroup  --master --default
radosgw-admin zone create --rgw-zonegroup=my_zonegroup --rgw-zone=my_zone --master --default
radosgw-admin period update --rgw-realm=my_realm --commit

ceph orch apply rgw my_realm my_zone --placement="host-192-168-141-20"

创建 nfs


ceph osd pool create my_nfs_pool 64

ceph orch apply nfs my_nfs my_nfs_pool nfs-ns

ceph osd pool application enable my_nfs_pool rbd

这里我们使用了rbd(块设备),pool 只能对一种类型进行 enable,另外两种类型是cephfs(文件系统),rgw(对象存储)

ceph osd pool application enable my_nfs_pool cephfs


using dashboard


ceph dashboard ac-user-create kk lmk@19980312! administrator

ceph mgr services 

访问
https://192.168.141.20:8443/#/dashboard

usercount : kk
pwd : lmk@19980312!


Ceph Dashboard is now available at:

             URL: https://host-192-168-141-20.zettakit:8443/
            User: admin
        Password: 03d5auyq0n

You can access the Ceph CLI with:

        sudo /usr/sbin/cephadm shell --fsid 2aa7de1c-497a-11eb-b926-fa163e717f07 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring

Please consider enabling telemetry to help improve Ceph:

        ceph telemetry on

For more information see:

        https://docs.ceph.com/docs/master/mgr/telemetry/

Bootstrap complete.


删除集群


cephadm rm-cluster --fsid 1064116e-4976-11eb-b4ae-fa163e717f07 --force

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

yuemake999

请我喝茶呗

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值