cephadm部署分布式ceph存储

一、集群规划

10.0.241.201 test-sujch-01
10.0.241.202 test-sujch-02
10.0.241.203 test-sujch-03
10.0.241.175 test-sujch-04 ## harbor、haproxy、时间服务器

系统优化

# 关闭防火墙
systemctl disable --now firewalld
# sshd优化
sed -i 's#\#UseDNS yes#UseDNS no#' /etc/ssh/sshd_config
sed -i 's#^GSSAPIAuthentication yes$#GSSAPIAuthentication no#' /etc/ssh/sshd_config
sed -i 's/GSSAPICleanupCredentials no/GSSAPICleanupCredentials yes/' /etc/ssh/sshd_config
sed -i 's/^#   StrictHostKeyChecking ask/   StrictHostKeyChecking ask/' /etc/ssh/ssh_config
systemctl restart sshd
# 关闭selinux
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g"  /etc/selinux/config
setenforce 0
# 在所有主机上进行操作

添加yum源

挂载本地yum源

rm -rf  /etc/yum.repos.d/*
# 挂在镜像yum源
mount /dev/cdrom /mnt/
# 生成本地repo文件
cat >  /etc/yum.repos.d/local.repo <<EOF
[local]
name=local
baseurl=file:///mnt
gpgcheck=0
enable=1
EOF

添加ceph网络yum源

cephadm工具yum源:https://mirrors.aliyun.com/ceph/rpm-octopus/el7/noarch/
ceph安装yum源:https://mirrors.aliyun.com/ceph/rpm-octopus/el7/x86_64/

添加kernel网络yum源

kernel内核最新yum源:http://mirror.centos.org/altarch/7/kernel/x86_64/

添加docker网络yum源

kernel内核最新yum源:http://mirror.centos.org/altarch/7/kernel/x86_64/
docker安装yum源:https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/

升级内核

yum -y install wget vim lrzsz unzip zip 
yum -y install kernel kernel-devel
grub2-set-default 0
reboot
uname -a
# 在所有主机上进行操作

部署docker

# 
yum -y install docker-ce
systemctl enable --now  docker

cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "insecure-registries" : ["0.0.0.0/0"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
 ]
}
EOF
systemctl restart  docker

# 在所有主机上进行操作


部署时间同步

yum -y install chrony
systemctl enable --now chronyd
# 在所有主机上进行操作
# chronyd服务端,这里使用10.0.241.175作为服务端,其他节点作为客户端

# 服务端10.0.241.175配置
cat >/etc/chrony.conf <<EOF
server 10.0.241.175 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
allow 10.0.241.0/24
local stratum 10
rtcsync
logdir /var/log/chrony
EOF
systemctl restart chronyd  && systemctl enable chronyd 


#客户端配置
cat >/etc/chrony.conf <<EOF
server 10.0.241.175 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
EOF
# 修改完配置,重启chronyd,使配置文件生效
systemctl restart chronyd  && systemctl enable chronyd 
chronyc sources


安装ceph

yum -y install ceph ceph-common cephadm
# 所有节点安装

引导一个新集群

# 以下所有操作在test-sujch-01(10.0.241.201)执行
cephadm --image 10.0.241.175:8080/ceph/ceph:v15 bootstrap --mon-ip 10.0.241.201
# 添加集群秘钥
ssh-copy-id -f -i /etc/ceph/ceph.pub root@10.0.241.201
ssh-copy-id -f -i /etc/ceph/ceph.pub root@10.0.241.202
ssh-copy-id -f -i /etc/ceph/ceph.pub root@10.0.241.203

# 添加主机
ceph orch host  add test-sujch-02 10.0.241.202
ceph orch host  add test-sujch-03 10.0.241.203
# mon监控器节点,运行多个副本,用于高可用
ceph orch apply mon --placement="3"


# 自动添加osd
ceph orch apply osd --all-available-devices
# 修改prometheus、alertmanager、grafana、node-exporter服务的镜像地址
ceph config set mgr mgr/cephadm/container_image_node_exporter 10.0.241.175:8080/prometheus/node-exporter:v0.18.1
ceph config set mgr mgr/cephadm/container_image_grafana 10.0.241.175:8080/ceph/ceph-grafana:6.7.4
ceph config set mgr mgr/cephadm/container_image_alertmanager 10.0.241.175:8080/prometheus/alertmanager:v0.20.0
ceph config set mgr mgr/cephadm/container_image_prometheus 10.0.241.175:8080/prometheus/prometheus:v2.18.1
# 重新应用镜像,相当于刷新配置
ceph orch redeploy node-exporter
ceph orch redeploy alertmanager
ceph orch redeploy grafana
ceph orch redeploy prometheus


RBD块存储

ceph osd pool create rbd-k8s 128 128
ceph osd pool application enable rbd-k8s rbd
# 新增用户
ceph auth get-or-create client.rbd-k8s mon 'allow r' osd 'allow * pool=rbd-k8s'
ceph auth ls 

osd 操作

# 磁盘擦除
ceph orch device zap label:/dev/sda
# 新加osd
ceph orch daemon add osd 10-0-103-231:/dev/sda

打标签


ceph orch host label add  master01 mon



监控器调整

对象存储


# 启用对象存储dashboard控制面板
ceph orch apply rgw yunwei01 cn-east-1 --placement="3"
radosgw-admin user create --uid=yunwei01 --display-name=yunwei01     --system
ceph dashboard set-rgw-api-access-key -i access_key
ceph dashboard set-rgw-api-secret-key -i secret_key

# 启动文件系统(未配置完)
ceph orch apply mds CephFS  --placement="3"

用户密码策略

ceph dashboard set-pwd-policy-min-length num    # 设置密码长度
ceph dashboard set-pwd-policy-min-complexity    # 设置密码复杂度

# 密码策略检查
ceph dashboard set-pwd-policy-check-length-enabled <true|false>
ceph dashboard set-pwd-policy-check-oldpwd-enabled <true|false>
ceph dashboard set-pwd-policy-check-username-enabled <true|false>
ceph dashboard set-pwd-policy-check-exclusion-list-enabled <true|false>
ceph dashboard set-pwd-policy-check-complexity-enabled <true|false>
ceph dashboard set-pwd-policy-check-sequential-chars-enabled <true|false>
ceph dashboard set-pwd-policy-check-repetitive-chars-enabled <true|false>
  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值