centos7 部署ceph nautilus 版本

1、Ceph环境规划

主机名

Public网络

Cluster网络

角色

ceph01

10.12.7.213

10.12.252.213

osd、mon、mgr、rgw、admin

ceph02

10.12.7.214

10.12.252.214

osd、mon、mgr、rgw

ceph03

10.12.7.215

10.12.252.215

osd、mon、mgr、rgw

ceph04

10.12.7.216

10.12.252.216

osd

client

10.12.7.208

client

2、系统环境准备

1)关闭防火墙与selinux

   systemctl stop firewalld
   systemctl disable firewalld
   setenforce 0
   sed -i 's/enforcing/disabled/' /etc/selinux/config

2)配置hosts解析

   cat >> /etc/hosts << EOF
    10.12.7.213 ceph01
    10.12.7.214 ceph02
    10.12.7.215 ceph03
    10.12.7.216 ceph04
    10.12.7.208 client01
    EOF

3)配置时间同步

   systemctl start chronyd
   systemctl enable chronyd
   chronyc sources -n

4)配置yum源

#替换原有系统源
[root@ceph01 yum.repos.d]# wget -O /etc/yum.repos.d/centos7.repo https://mirrors.aliyun.com/repo/Centos-7.repo
[root@ceph01 yum.repos.d]# ll
total 12
-rw-r--r--. 1 root root 2523 Aug  4  2022 centos7.repo
[root@ceph01 yum.repos.d]# sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/centos7.repo 
#配置epel源
[root@ceph01 yum.repos.d]# wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
#配置ceph源
[root@ceph01 yum.repos.d]# yum -y install https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/ceph-release-1-1.el7.noarch.rpm
#替换为阿里源https://mirrors.aliyun.com/ceph
[root@ceph01 yum.repos.d]# sed -i 's/download.ceph.com/mirrors.aliyun.com\/ceph/g' /etc/yum.repos.d/ceph.repo 

5)更新系统内核

#下载内核包(版本有变动根据实际情况下载)
[root@ceph01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm
[root@ceph01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-ml-devel-6.8.2-1.el7.elrepo.x86_64.rpm
[root@ceph01 ~]# yum localinstall -y kernel-ml-*
[root@ceph01 ~]# grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg
[root@ceph01 ~]# reboot

6)配置免密登录(admin部署机生成秘钥)

[root@ceph01 ~]# ssh-keygen 
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:KEZrGspeQeW+wZ+I9ExaaJ222YUrfu7ntcfH3JztTgs root@ceph01
The key's randomart image is:
+---[RSA 2048]----+
|     .           |
|    o            |
|   ...           |
|  ..=....        |
|  .==X..S.       |
|..o=X.O +        |
|...+ O =   ..Eo.=|
|. . . .. .. .o.*=|
| .   .+oo. .. .+o|
+----[SHA256]-----+
[root@ceph01 ~]# 
[root@ceph01 ~]# ssh-copy-id -i .ssh/id_rsa.pub 10.12.7.213
[root@ceph01 ~]# ssh-copy-id -i .ssh/id_rsa.pub 10.12.7.214
[root@ceph01 ~]# ssh-copy-id -i .ssh/id_rsa.pub 10.12.7.215
[root@ceph01 ~]# ssh-copy-id -i .ssh/id_rsa.pub 10.12.7.216
[root@ceph01 ~]# ssh-copy-id -i .ssh/id_rsa.pub 10.12.7.208

7)部署依赖包

[root@ceph01 ceph]# yum install -y python-setuptools python-pip

8)部署ceph-deploy工具(admin机器部署)

#至少部署2.0.1版本
yum install -y https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/ceph-deploy-2.0.1-0.noarch.rpm

3、部署ceph集群

1)为所有节点都创建一个 Ceph 工作目录,后续的工作都在该目录下进行

[root@ceph01 ~]# mkdir -p /etc/cep

2)初始化mon节点


[root@ceph01 ~]# cd /etc/ceph/
#ceph0{1..3} 部署mon节点
[root@ceph01 ceph]# ceph-deploy new --public-network 10.12.7.0/24 --public-network 10.12.252.0/24 ceph0{1..3}
#通过手动方式为每个mon节点安装ceph-mon (ceph01~03)
[root@ceph01 ceph]# yum -y install ceph-mon
#再管理节点初始化mon节点(ceph01~03)
[root@ceph01 ceph]# ceph-deploy mon create ceph0{1..3}   #创建 mon 节点,由于 monitor 使用 Paxos 算法,其高可用集群节点数量要求为大于等于 3 的奇数台
[root@ceph01 ceph]# ceph-deploy --overwrite-conf mon create-initial        #配置初始化 mon 节点,并向所有节点同步配置,--overwrite-conf 参数用于表示强制覆盖配置文件
               

3)验证mon服务已启动

[root@ceph01 ceph]# ps -ef | grep mon
dbus      1040     1  0 15:28 ?        00:00:00 /bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation
root      1051     1  0 15:28 ?        00:00:00 /usr/sbin/NetworkManager --no-daemon
ceph      2400     1  0 17:18 ?        00:00:00 /usr/bin/ceph-mon -f --cluster ceph --id ceph01 --setuser ceph --setgroup ceph
root      2757  1533  0 17:21 pts/0    00:00:00 grep --color=auto mon

4)分发admin秘钥使多机管理ceph

#部署能够管理 Ceph 集群的节点
#可实现在各个节点执行 ceph 命令管理集群(需要管理权限的节点ceph01~03)
[root@ceph01 ceph]# cd /etc/ceph
[root@ceph01 ceph]# ceph-deploy --overwrite-conf config push ceph0{1..3}        #向所有 mon 节点同步配置,确保所有 mon 节点上的 ceph.conf 内容必须一致
[root@ceph01 ceph]# ceph-deploy admin ceph0{1..3}            #本质就是把 ceph.client.admin.keyring 集群认证文件拷贝到各个节点

5)部署ceph-mgr节点

#通过手动方式为每个mgr节点安装ceph-mgr (ceph01~03)
[root@ceph01 ceph]# yum -y install ceph-mgr
#ceph-mgr守护进程以Active/Standby模式运行,可确保在Active节点或其ceph-mgr守护进程故障时,其中的一个Standby实例可以在不中断服务的情况下接管其任务。根据官方的架构原则,mgr至少要有两个节点来进行工作。
[root@ceph01 ceph]# ceph-deploy mgr create ceph0{1..3}

6)验证mgr服务已启动

[root@ceph01 ceph]# ps -ef | grep mgr
postfix   1439  1431  0 15:28 ?        00:00:00 qmgr -l -t unix -u
ceph      2984     1  9 17:45 ?        00:00:02 /usr/bin/ceph-mgr -f --cluster ceph --id ceph01 --setuser ceph --setgroup ceph
root      3114  1533  0 17:45 pts/0    00:00:00 grep --color=auto mgr
[root@ceph01 ceph]# 

7)初始化node节点

#再部署osd前下载所需基础环境包
[root@ceph01 ceph]# ceph-deploy install --no-adjust-repos --nogpgcheck ceph0{1..4}

8)部署osd服务

a、列出ceph节点可用磁盘

#可以列出多台节点磁盘
[root@ceph01 ceph]# ceph-deploy disk list ceph01
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy disk list ceph01
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : list
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0xe23518>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  host                          : ['ceph01']
[ceph_deploy.cli][INFO  ]  func                          : <function disk at 0xe07c08>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph01][DEBUG ] connected to host: ceph01 
[ceph01][DEBUG ] detect platform information from remote host
[ceph01][DEBUG ] detect machine type
[ceph01][DEBUG ] find the location of an executable
[ceph01][INFO  ] Running command: fdisk -l
[ceph01][INFO  ] Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors
[ceph01][INFO  ] Disk /dev/sdc: 53.7 GB, 53687091200 bytes, 104857600 sectors
[ceph01][INFO  ] Disk /dev/sdb: 53.7 GB, 53687091200 bytes, 104857600 sectors
[ceph01][INFO  ] Disk /dev/sdd: 53.7 GB, 53687091200 bytes, 104857600 sectors
[ceph01][INFO  ] Disk /dev/sde: 53.7 GB, 53687091200 bytes, 104857600 sectors
[ceph01][INFO  ] Disk /dev/mapper/centos-root: 81.1 GB, 81075896320 bytes, 158351360 sectors
[ceph01][INFO  ] Disk /dev/mapper/centos-swap: 25.8 GB, 25769803776 bytes, 50331648 sectors
[root@ceph01 ceph]#

b、擦除节点磁盘

#放至新盘有数据擦除命令
/bin/dd if=/dev/zero of=/dev/盘符 bs=1M count=10 conv=fsync
wipefs -a /dev/盘符
#需要添加至OSD的磁盘擦除,请注意不要擦除系统盘
[root@ceph01 ceph]# ceph-deploy disk zap ceph01 /dev/sdb /dev/sdc /dev/sdd /dev/sde
[root@ceph01 ceph]# ceph-deploy disk zap ceph02 /dev/sdb /dev/sdc /dev/sdd /dev/sde
[root@ceph01 ceph]# ceph-deploy disk zap ceph03 /dev/sdb /dev/sdc /dev/sdd /dev/sde
[root@ceph01 ceph]# ceph-deploy disk zap ceph04 /dev/sda /dev/sdc /dev/sdd /dev/sde

c、添加OSD

数据分类保存

data: ceph保存的对象数据

block racoks db元数据

block-wal:数据库wal日志

以下三种方案:

>一种类型磁盘(数据放在一快盘)

        >机械硬盘或者SSD

                >data: ceph保存的对象数据

                >block racoks db元数据

                >block-wal:数据库wal日志

 #数据全在一块盘使用以下命令
  ceph-deploy osd create {node} --data /path/to/data

>两种类型磁盘(元数据和wal日志放在SSD,data放在机械盘)

        >SSD硬盘

                >block racoks db元数据

                >block-wal:数据库wal日志

        >机械硬盘

                >data: ceph保存的对象数据

#block-db和block-wal 放在SSD硬盘上,如果只有一块SSD需要分区
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/ssd1 --block-wal /path/to/ssd2

>三种类型磁盘(元数据放在NVME硬盘,wal日志放在SSD硬盘,data放在机械盘)

        >NVME硬盘

                >block racoks db元数据

        >SSD 硬盘

                >block-wal:数据库wal日志

        >机械硬盘

                >data: ceph保存的对象数据

#三种类型硬盘分别存放三种数据
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/nvme --block-wal /path/to/ssd

本次使用一种类型硬盘部署OSD

#osd的id从0开始顺序使用
[root@ceph01 ceph]# ceph-deploy osd create ceph01 --data /dev/sdb
[root@ceph01 ceph]# ceph-deploy osd create ceph01 --data /dev/sdc
[root@ceph01 ceph]# ceph-deploy osd create ceph01 --data /dev/sdd
[root@ceph01 ceph]# ceph-deploy osd create ceph01 --data /dev/sde
[root@ceph01 ceph]# ceph-deploy osd create ceph02 --data /dev/sdb
[root@ceph01 ceph]# ceph-deploy osd create ceph02 --data /dev/sdc
[root@ceph01 ceph]# ceph-deploy osd create ceph02 --data /dev/sdd
[root@ceph01 ceph]# ceph-deploy osd create ceph02 --data /dev/sde
[root@ceph01 ceph]# ceph-deploy osd create ceph03 --data /dev/sdb
[root@ceph01 ceph]# ceph-deploy osd create ceph03 --data /dev/sdc
[root@ceph01 ceph]# ceph-deploy osd create ceph03 --data /dev/sdd
[root@ceph01 ceph]# ceph-deploy osd create ceph03 --data /dev/sde
[root@ceph01 ceph]# ceph-deploy osd create ceph04 --data /dev/sda
[root@ceph01 ceph]# ceph-deploy osd create ceph04 --data /dev/sdc
[root@ceph01 ceph]# ceph-deploy osd create ceph04 --data /dev/sdd
[root@ceph01 ceph]# ceph-deploy osd create ceph04 --data /dev/sde
[root@ceph01 ~]# ceph -s
  cluster:
    id:     32e0a117-f0bf-4dbf-b077-e649757d433a
    health: HEALTH_WARN
            mons are allowing insecure global_id reclaim
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 3d)
    mgr: ceph01(active, since 3d), standbys: ceph02, ceph03
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@ceph01 ~]# 

9)解决集群报错

#解决 HEALTH_WARN 问题:mons are allowing insecure global_id reclaim问题:
禁用不安全模式:ceph config set mon auth_allow_insecure_global_id_reclaim false  (需要再所有的mon节点执行)
[root@ceph01 ~]# ceph config set mon auth_allow_insecure_global_id_reclaim false
[root@ceph01 ~]# ceph -s
  cluster:
    id:     32e0a117-f0bf-4dbf-b077-e649757d433a
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 3d)
    mgr: ceph01(active, since 3d), standbys: ceph02, ceph03
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@ceph01 ~]# 

10)部署RBD(块存储)

a、创建RBD

#创建存储池命令格式(默认是replicated 副本模式)
ceph osd pool create {pg_num} {pgp_num} (replicated|erasure)  
#创建一个存储池
[root@ceph01 ceph]# ceph osd pool create myrbd1 64 64
pool 'myrbd1' created
[root@ceph01 ceph]# ceph osd pool ls
myrbd1
[root@ceph01 ceph]#
#对存储池启用RBD功能
[root@ceph01 ceph]# ceph osd pool application enable myrbd1 rbd
enabled application 'rbd' on pool 'myrbd1'
[root@ceph01 ceph]# 
#使用rbd命令初始化存储池
[root@ceph01 ceph]# rbd pool init -p myrbd1 

b、创建img镜像

rbd 命令可用于创建、查看及删除块设备相应的映像(image),以及克隆映像、创建快照、回滚快照及查看快照等管理操作

#创建一个5G大小的映像
[root@ceph01 ceph]# rbd create myimg1 --size 5G --pool myrbd1
#创建指定特性为layering的img
[root@ceph01 ceph]# rbd create myimg2 --size 10G --pool myrbd1 --image-format 2 --image-feature layering
[root@ceph01 ceph]# rbd ls --pool myrbd1
myimg1
myimg2
#查看指定rbd信息
[root@ceph01 ceph]# rbd --image myimg2 --pool myrbd1 info
rbd image 'myimg2':
    size 10 GiB in 2560 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 12ae5332c635
    block_name_prefix: rbd_data.12ae5332c635
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Sun Apr  7 15:56:15 2024
    access_timestamp: Sun Apr  7 15:56:15 2024
    modify_timestamp: Sun Apr  7 15:56:15 2024
[root@ceph01 ceph]#
#修改映像大小
[root@ceph01 ceph]# rbd resize -p myrbd1 --image myimg2 --size 20G
Resizing image: 100% complete...done.
[root@ceph01 ceph]# rbd --image myimg2 --pool myrbd1 info
rbd image 'myimg2':
    size 20 GiB in 5120 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 12ae5332c635
    block_name_prefix: rbd_data.12ae5332c635
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Sun Apr  7 15:56:15 2024
    access_timestamp: Sun Apr  7 15:56:15 2024
    modify_timestamp: Sun Apr  7 15:56:15 2024

#使用 resize 调整镜像大小,一般建议只增不减,如果是减少的话需要加选项 --allow-shrink
[root@ceph01 ceph]# rbd resize -p myrbd1 --image myimg2 --size 15G --allow-shrink
Resizing image: 100% complete...done.
[root@ceph01 ceph]# rbd --image myimg2 --pool myrbd1 info
rbd image 'myimg2':
    size 15 GiB in 3840 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 12ae5332c635
    block_name_prefix: rbd_data.12ae5332c635
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Sun Apr  7 15:56:15 2024
    access_timestamp: Sun Apr  7 15:56:15 2024
    modify_timestamp: Sun Apr  7 15:56:15 2024
[root@ceph01 ceph]#
#删除镜像
[root@ceph01 ceph]# rbd rm -p myrbd1 --image myimg1
[root@ceph01 ceph]# rbd remove myrbd1/myimg1
#推荐使用 trash 命令,这个命令删除是将镜像移动至回收站,如果想找回还可以恢复
[root@ceph01 ceph]# rbd trash move myrbd1/myimg1 
[root@ceph01 ceph]# rbd ls -l -p myrbd1
NAME   SIZE   PARENT FMT PROT LOCK 
myimg2 15 GiB          2           
#查看回收站映像

[root@ceph01 ceph]# rbd trash list -p myrbd1
12a2d4e4e294 myimg1
[root@ceph01 ceph]#
#还原回收站中映像
[root@ceph01 ceph]# rbd trash restore myrbd1/12a2d4e4e294
[root@ceph01 ceph]# rbd ls -l -p myrbd1
NAME   SIZE   PARENT FMT PROT LOCK 
myimg1  5 GiB          2           
myimg2 15 GiB          2           
[root@ceph01 ceph]#

c、客户端使用rbd块存储

#查看当前ceph状态
[root@ceph01 ceph]# ceph df
RAW STORAGE:
    CLASS     SIZE        AVAIL       USED        RAW USED     %RAW USED 
    hdd       800 GiB     784 GiB     150 MiB       16 GiB          2.02 
    TOTAL     800 GiB     784 GiB     150 MiB       16 GiB          2.02 
 
POOLS:
    POOL       ID     PGS     STORED     OBJECTS     USED        %USED     MAX AVAIL 
    myrbd1      1      64      405 B           7     768 KiB         0       248 GiB 
[root@ceph01 ceph]#
#客户端安装ceph-common
[root@client01 ~]# yum -y install ceph-common
#客户端部署同步mon节点认证文件(后续单读创建key文件,不使用管理员key)
[root@ceph01 ceph]# scp /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 10.12.7.208:/etc/ceph/
ceph.conf                                                                                                                  100%  298   297.6KB/s   00:00    
ceph.client.admin.keyring                                                                                                  100%  151   197.9KB/s   00:00    
[root@ceph01 ceph]#
#客户端映射img为块设备(按照新加硬盘进行格式化及挂载操作)
[root@client01 ~]# rbd -p myrbd1 map myimg2 
/dev/rbd0
[root@client01 ~]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
rbd0            252:0    0   15G  0 disk 
sr0              11:0    1 1024M  0 rom  

fd0               2:0    1    4K  0 disk 
sda               8:0    0  100G  0 disk 
├─sda2            8:2    0 99.5G  0 part 
│ ├─centos-swap 253:1    0   24G  0 lvm  [SWAP]
│ └─centos-root 253:0    0 75.5G  0 lvm  /
└─sda1            8:1    0  500M  0 part /boot
[root@client01 ~]# 
#格式化硬盘
[root@client01 ceph]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0              isize=512    agcount=8, agsize=163824 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=1310592, imaxpct=25
         =                       sunit=16     swidth=16 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@client01 ceph]# 
#挂载使用
[root@client01 ceph]# mount /dev/rbd0 /opt/

[root@client01 ceph]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 977M     0  977M   0% /dev
tmpfs                    991M     0  991M   0% /dev/shm
tmpfs                    991M   17M  975M   2% /run
tmpfs                    991M     0  991M   0% /sys/fs/cgroup
/dev/mapper/centos-root   76G  2.2G   74G   3% /
/dev/sda1                497M  159M  338M  33% /boot
tmpfs                    199M     0  199M   0% /run/user/0
/dev/rbd0                5.0G   33M  5.0G   1% /opt
[root@client01 ceph]# 
#卸载及取消映射
[root@client01 ceph]# umount /opt/
[root@client01 ceph]# rbd unmap /dev/rbd0
[root@client01 ceph]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1 1024M  0 rom  
fd0               2:0    1    4K  0 disk 
sda               8:0    0  100G  0 disk 
├─sda2            8:2    0 99.5G  0 part 
│ ├─centos-swap 253:1    0   24G  0 lvm  [SWAP]
│ └─centos-root 253:0    0 75.5G  0 lvm  /
└─sda1            8:1    0  500M  0 part /boot
[root@client01 ceph]# 

d、客户端使用RBD块存储(普通用户)

#创建普通用户
[root@ceph01 ceph]# ceph auth add client.ceshi mon 'allow r' osd 'allow rwx pool=myrbd1'
added key for client.ceshi
#验证用户信息
[root@ceph01 ceph]# ceph auth get client.ceshi
[client.ceshi]
    key = AQDnpxhmLOIkOhAA4svWmWGHwhyFfjzxVjfTIQ==
    caps mon = "allow r"
    caps osd = "allow rwx pool=myrbd1"
exported keyring for client.ceshi
[root@ceph01 ceph]# 
#创建keyring文件
[root@ceph01 ceph]# ceph-authtool --create-keyring ceph.client.ceshi.keyring
creating ceph.client.ceshi.keyring
#导出用户keyring
[root@ceph01 ceph]# ceph auth get client.ceshi -o  ceph.client.ceshi.keyring
exported keyring for client.ceshi
[root@ceph01 ceph]# cat ceph.client.ceshi.keyring
[client.ceshi]
    key = AQDnpxhmLOIkOhAA4svWmWGHwhyFfjzxVjfTIQ==
    caps mon = "allow r"
    caps osd = "allow rwx pool=myrbd1"
#客户端安装ceph-common
[root@client01 ~]# yum -y install ceph-common
#将keyring配置文件同步至客户端
[root@ceph01 ceph]# scp ceph.client.ceshi.keyring ceph.conf  client01:/etc/ceph/
ceph.client.ceshi.keyring                                                                                                  100%  121   153.9KB/s   00:00    
[root@ceph01 ceph]# 
#客户端验证用户权限
[root@client01 ceph]# ceph --user ceshi -s
  cluster:
    id:     32e0a117-f0bf-4dbf-b077-e649757d433a
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 2d)
    mgr: ceph01(active, since 2d), standbys: ceph02, ceph03
    mds: mycephfs:1 {0=ceph03=up:active} 2 up:standby
    osd: 16 osds: 16 up (since 2d), 16 in (since 2d)
    rgw: 3 daemons active (ceph01, ceph02, ceph03)
 
  task status:
 
  data:
    pools:   11 pools, 608 pgs
    objects: 386 objects, 386 MiB
    usage:   28 GiB used, 772 GiB / 800 GiB avail
    pgs:     608 active+clean
 
#映射rbd
[root@client01 ceph]# rbd ls -l -p myrbd1
NAME   SIZE   PARENT FMT PROT LOCK 
myimg1  5 GiB          2           
myimg2 15 GiB          2           
[root@client01 ceph]# rbd --user ceshi -p myrbd1 map myimg1
/dev/rbd0
[root@client01 ceph]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
rbd0            252:0    0    5G  0 disk 
sr0              11:0    1 1024M  0 rom  
fd0               2:0    1    4K  0 disk 
sda               8:0    0  100G  0 disk 
├─sda2            8:2    0 99.5G  0 part 
│ ├─centos-swap 253:1    0   24G  0 lvm  [SWAP]
│ └─centos-root 253:0    0 75.5G  0 lvm  /
└─sda1            8:1    0  500M  0 part /boot
[root@client01 ceph]# 
#格式化硬盘
[root@client01 ceph]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0              isize=512    agcount=8, agsize=163824 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=1310592, imaxpct=25
         =                       sunit=16     swidth=16 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@client01 ceph]# 
#挂载使用
[root@client01 ceph]# mount /dev/rbd0 /opt/

[root@client01 ceph]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 977M     0  977M   0% /dev
tmpfs                    991M     0  991M   0% /dev/shm
tmpfs                    991M   17M  975M   2% /run
tmpfs                    991M     0  991M   0% /sys/fs/cgroup
/dev/mapper/centos-root   76G  2.2G   74G   3% /
/dev/sda1                497M  159M  338M  33% /boot
tmpfs                    199M     0  199M   0% /run/user/0
/dev/rbd0                5.0G   33M  5.0G   1% /opt
[root@client01 ceph]# 
#卸载及取消映射
[root@client01 ceph]# umount /opt/
[root@client01 ceph]# rbd unmap /dev/rbd0
[root@client01 ceph]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1 1024M  0 rom  
fd0               2:0    1    4K  0 disk 
sda               8:0    0  100G  0 disk 
├─sda2            8:2    0 99.5G  0 part 
│ ├─centos-swap 253:1    0   24G  0 lvm  [SWAP]
│ └─centos-root 253:0    0 75.5G  0 lvm  /
└─sda1            8:1    0  500M  0 part /boot
[root@client01 ceph]# 

e、RBD镜像空间扩容

#创建rbd镜像
[root@ceph01 ceph]# rbd create myimg3 --size 5G --pool myrbd1
[root@ceph01 ceph]# rbd ls -l -p myrbd1
NAME   SIZE   PARENT FMT PROT LOCK 
myimg1 10 GiB          2      excl 
myimg2 15 GiB          2           
myimg3  5 GiB          2  
#客户端映射镜像
[root@client01 ceph]# rbd --user ceshi -p myrbd1 map myimg3
/dev/rbd2
[root@client01 ceph]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
rbd0            252:0    0   10G  0 disk /opt
sr0              11:0    1 1024M  0 rom  
rbd1            252:16   0   15G  0 disk 
fd0               2:0    1    4K  0 disk 
sda               8:0    0  100G  0 disk 
├─sda2            8:2    0 99.5G  0 part 
│ ├─centos-swap 253:1    0   24G  0 lvm  [SWAP]
│ └─centos-root 253:0    0 75.5G  0 lvm  /
└─sda1            8:1    0  500M  0 part /boot
rbd2            252:32   0    5G  0 disk 
#格式化硬盘
[root@client01 ceph]# mkfs.xfs /dev/rbd2 
meta-data=/dev/rbd2              isize=512    agcount=8, agsize=163824 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=1310592, imaxpct=25
         =                       sunit=16     swidth=16 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

[root@client01 ceph]# mount /dev/rbd2 /data/


[root@client01 ceph]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 977M     0  977M   0% /dev
tmpfs                    991M     0  991M   0% /dev/shm
tmpfs                    991M   17M  975M   2% /run
tmpfs                    991M     0  991M   0% /sys/fs/cgroup
/dev/mapper/centos-root   76G  2.2G   74G   3% /
/dev/sda1                497M  159M  338M  33% /boot
tmpfs                    199M     0  199M   0% /run/user/0
/dev/rbd0                5.0G   33M  5.0G   1% /opt
/dev/rbd2                5.0G   33M  5.0G   1% /data
[root@client01 ceph]#
#扩容rbd镜像
[root@ceph01 ceph]# rbd resize -p myrbd1 --image myimg3 --size 10G
Resizing image: 100% complete...done.
[root@ceph01 ceph]# rbd ls -l -p myrbd1
NAME   SIZE   PARENT FMT PROT LOCK 
myimg1 10 GiB          2      excl 
myimg2 15 GiB          2           
myimg3 10 GiB          2      excl 
#客户端查看已生效
[root@client01 ceph]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
rbd0            252:0    0   10G  0 disk /opt
sr0              11:0    1 1024M  0 rom  
rbd1            252:16   0   15G  0 disk 
fd0               2:0    1    4K  0 disk 
sda               8:0    0  100G  0 disk 
├─sda2            8:2    0 99.5G  0 part 
│ ├─centos-swap 253:1    0   24G  0 lvm  [SWAP]
│ └─centos-root 253:0    0 75.5G  0 lvm  /
└─sda1            8:1    0  500M  0 part /boot
rbd2            252:32   0   10G  0 disk /data
[root@client01 ceph]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 977M     0  977M   0% /dev
tmpfs                    991M     0  991M   0% /dev/shm
tmpfs                    991M   17M  975M   2% /run
tmpfs                    991M     0  991M   0% /sys/fs/cgroup
/dev/mapper/centos-root   76G  2.2G   74G   3% /
/dev/sda1                497M  159M  338M  33% /boot
tmpfs                    199M     0  199M   0% /run/user/0
/dev/rbd0                5.0G   33M  5.0G   1% /opt
/dev/rbd2                5.0G   33M  5.0G   1% /data

#扩容文件系统
[root@client01 ceph]# xfs_growfs /dev/rbd2
meta-data=/dev/rbd2              isize=512    agcount=8, agsize=163824 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0 spinodes=0
data     =                       bsize=4096   blocks=1310592, imaxpct=25
         =                       sunit=16     swidth=16 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal               bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 1310592 to 2621440
[root@client01 ceph]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 977M     0  977M   0% /dev
tmpfs                    991M     0  991M   0% /dev/shm
tmpfs                    991M   17M  975M   2% /run
tmpfs                    991M     0  991M   0% /sys/fs/cgroup
/dev/mapper/centos-root   76G  2.2G   74G   3% /
/dev/sda1                497M  159M  338M  33% /boot
tmpfs                    199M     0  199M   0% /run/user/0
/dev/rbd0                5.0G   33M  5.0G   1% /opt
/dev/rbd2                 10G   33M   10G   1% /data

f、RBD回收站机制

[root@ceph01 ceph]# rbd help trash
    status                            Show the status of this image.
    trash list (trash ls)             List trash images.
    trash move (trash mv)             Move an image to the trash.
    trash purge                       Remove all expired images from trash.
    trash remove (trash rm)           Remove an image from trash.
    trash restore                     Restore an image from trash.
#查看镜像状态
[root@ceph01 ceph]# rbd status -p myrbd1 --image myimg2
Watchers:
    watcher=10.12.7.208:0/4032975662 client.235883 cookie=18446462598732840963   #有挂载使用
[root@ceph01 ceph]# 
#客户端卸载镜像
[root@client01 ceph]# rbd --user ceshi -p myrbd1 unmap /dev/rbd1
#再次查看镜像状态
[root@ceph01 ceph]# rbd status -p myrbd1 --image myimg2
Watchers: none
#将镜像移动到回收站
[root@ceph01 ceph]# rbd trash move -p myrbd1 --image myimg2
#查看回收站
[root@ceph01 ceph]# rbd trash list -p myrbd1
12ae5332c635 myimg2
#还原镜像
[root@ceph01 ceph]# rbd trash restore  -p myrbd1 --image myimg2 --image-id 12ae5332c635
[root@ceph01 ceph]# rbd trash list -p myrbd1
[root@ceph01 ceph]# rbd ls -l -p myrbd1
NAME   SIZE   PARENT FMT PROT LOCK 
myimg1 10 GiB          2      excl 
myimg2 15 GiB          2           
myimg3 10 GiB          2      excl 
[root@ceph01 ceph]# 
#删除回收站镜像
[root@ceph01 ceph]# rbd trash remove -p myrbd1  --image-id 12ae5332c635
Removing image: 100% complete...done.
[root@ceph01 ceph]# rbd trash list -p myrbd1
[root@ceph01 ceph]# 

g、RBD镜像快照

命令格式:
[root@ceph01 ~]# rbd help snap
    snap create (snap add)            #创建快照
    snap limit clear                  #清除镜像的快照数量限制
    snap limit set                    #设置一个镜像的快照上限
    snap list (snap ls)               #列出快照
    snap protect                      #保护快照被删除
    snap purge                        #删除所有未保护的快照
    snap remove (snap rm)             #删除一个快照
    snap rename                       #重命名快照
    snap rollback (snap revert)       #还原快照
    snap unprotect                    #允许一个快照被删除(取消快照保护)
#查看快照前数据
[root@client01 data]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 977M     0  977M   0% /dev
tmpfs                    991M     0  991M   0% /dev/shm
tmpfs                    991M   25M  967M   3% /run
tmpfs                    991M     0  991M   0% /sys/fs/cgroup
/dev/mapper/centos-root   76G  2.2G   74G   3% /
/dev/sda1                497M  159M  338M  33% /boot
/dev/rbd0                5.0G   33M  5.0G   1% /opt
/dev/rbd2                 10G  102M  9.9G   1% /data
tmpfs                    199M     0  199M   0% /run/user/0
[root@client01 data]# ll
total 69808
-rw-r--r--. 1 root root 71482352 Apr 15 15:59 kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm
[root@client01 data]# 
#创建并验证快照
[root@ceph01 ~]# rbd snap create -p myrbd1 --image myimg3 --snap myimg3-snap-20240415
[root@ceph01 ~]# rbd snap list -p myrbd1 --image myimg3
SNAPID NAME                 SIZE   PROTECTED TIMESTAMP                
     4 myimg3-snap-20240415 10 GiB           Mon Apr 15 16:19:35 2024 
[root@ceph01 ~]# 
#删除数据并取消映射
[root@client01 data]# ll
total 69808
-rw-r--r--. 1 root root 71482352 Apr 15 15:59 kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm
[root@client01 data]# rm -f kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm 
[root@client01 data]# ll
total 0
[root@client01 /]# umount /data/
[root@client01 /]# rbd --user ceshi -p myrbd1 unmap /dev/rbd2
[root@client01 /]# 
#还原快照
[root@ceph01 ~]# rbd snap rollback -p myrbd1 --image myimg3 --snap myimg3-snap-20240415
Rolling back to snapshot: 100% complete...done.
[root@ceph01 ~]# 
#验证是否还原成功
[root@client01 /]# rbd --user ceshi -p myrbd1 map myimg3
/dev/rbd1
[root@client01 /]# mount /dev/rbd1 /data/
[root@client01 /]# ll /data/
total 69808
-rw-r--r--. 1 root root 71482352 Apr 15 15:59 kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm
[root@client01 /]#
#删除快照并验证
[root@ceph01 ~]# rbd snap remove -p myrbd1 --image myimg3 --snap myimg3-snap-20240415
Removing snap: 100% complete...done.
[root@ceph01 ~]# rbd snap list -p myrbd1 --image myimg3
[root@ceph01 ~]# 
#快照数量限制(限制两个,创建第三个报错)
[root@ceph01 ~]# rbd snap limit set -p myrbd1 --image myimg3 --limit 2
[root@ceph01 ~]# rbd snap create -p myrbd1 --image myimg3 --snap myimg3-snap-20240415-1
[root@ceph01 ~]# rbd snap create -p myrbd1 --image myimg3 --snap myimg3-snap-20240415-2
[root@ceph01 ~]# rbd snap create -p myrbd1 --image myimg3 --snap myimg3-snap-20240415-3
rbd: failed to create snapshot: (122) Disk quota exceeded
[root@ceph01 ~]# 
#取消快照数量限制
[root@ceph01 ~]# rbd snap limit clear -p myrbd1 --image myimg3
[root@ceph01 ~]# rbd snap list -p myrbd1 --image myimg3
SNAPID NAME                   SIZE   PROTECTED TIMESTAMP                
     6 myimg3-snap-20240415-1 10 GiB           Mon Apr 15 16:31:44 2024 
     7 myimg3-snap-20240415-2 10 GiB           Mon Apr 15 16:31:47 2024 
[root@ceph01 ~]# rbd snap create -p myrbd1 --image myimg3 --snap myimg3-snap-20240415-3
[root@ceph01 ~]# rbd snap list -p myrbd1 --image myimg3
SNAPID NAME                   SIZE   PROTECTED TIMESTAMP                
     6 myimg3-snap-20240415-1 10 GiB           Mon Apr 15 16:31:44 2024 
     7 myimg3-snap-20240415-2 10 GiB           Mon Apr 15 16:31:47 2024 
    10 myimg3-snap-20240415-3 10 GiB           Mon Apr 15 16:33:25 2024 
[root@ceph01 ~]# 

11)部署rgw节点(对象存储)

a、安装ceph-radosgw

#部署所需安装包ceph-radosgw(rgw节点需要安装)
[root@ceph01 ceph]# yum -y install ceph-radosgw
[root@ceph02 ceph]# yum -y install ceph-radosgw
[root@ceph03 ceph]# yum -y install ceph-radosgw
#在管理节点创建rgw守护进程
[root@ceph01 ceph]# ceph-deploy --overwrite-conf rgw create ceph0{1..3}
#更改监听端口(Civetweb 默认监听在 7480 端口并提供 http 协议)
[root@ceph01 ceph]# vim ceph.conf 
....
[client.rgw.ceph01]  #ceph01是启动时的主机名
rgw_host = ceph01
rgw_frontends = civetweb port=10000 num_threads=600 request_timeout_ms=70000
log file = /var/log/ceph/client.rgw.rgwweb1.log
[client.rgw.ceph02]
rgw_host = ceph02
rgw_frontends = civetweb port=10000 num_threads=600 request_timeout_ms=70000
log file = /var/log/ceph/client.rgw.rgwweb2.log
[client.rgw.ceph03]
rgw_host = ceph03
rgw_frontends = civetweb port=10000 num_threads=600 request_timeout_ms=70000
log file = /var/log/ceph/client.rgw.rgwweb3.log

#rgw_host:对应的RadosGW名称或者IP地址
#rgw_frontends:这里配置监听的端口,是否使用https,以及一些常用配置:
1、port:如果是https端口,需要在端口后面加一个s。
2、ssl_certificate:指定证书的路径。
3、num_threads:最大并发连接数,默认为50,根据需求调整,通常在生产集群环境中此值应该更大
4、request_timeout_ms:发送与接收超时时长,以ms为单位,默认为30000
5、access_log_file:访问日志路径,默认为空
6、error_log_file:错误日志路径,默认为空

#修改完 ceph.conf 配置文件后需要重启对应的 RadosGW 服务,再推送配置文件
[root@ceph01 ceph]# ceph-deploy --overwrite-conf config push ceph0{1..3}
#每个rgw节点重启rgw服务
[root@ceph01 ceph]# systemctl restart ceph-radosgw.target

b、创建radosgw账户

#创建radosgw账户
#在管理节点使用 radosgw-admin 命令创建 RadosGW 账户
[root@ceph01 ceph]# radosgw-admin user create --uid="rgwuser" --display-name="rgw access user"
{
    "user_id": "rgwuser",
    "display_name": "rgw access user",
    "email": "",
    "suspended": 0,
    "max_buckets": 1000,
    "subusers": [],
    "keys": [
        {
            "user": "rgwuser",
            "access_key": "DM8T9KSSZ4ZZOTTV1XAL",
            "secret_key": "TyAZusFlMAqJeNSzUonvhUIP3c98xGBLKYU4PCj0"
        }
    ],
    "swift_keys": [],
    "caps": [],
    "op_mask": "read, write, delete",
    "default_placement": "",
    "default_storage_class": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "user_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "temp_url_keys": [],
    "type": "rgw",
    "mfa_ids": []
}

[root@ceph01 ceph]# 
#创建成功后将输出用户的基本信息,其中最重要的两项信息为 access_key 和 secret_key 。用户创建成后功,如果忘记用户信息可以使用下面的命令查看
radosgw-admin user info --uid="rgwuser"

c、客户端测试rgw

#客户端测试
#下载上cmd命令
[root@client01 ~]# yum -y install s3cmd
#配置s3cmd
#执行 $ s3cmd --configure生成配置文件,一路Enter,注意跳过认证并保存配置
[root@client01 ~]# s3cmd --configure
......
...
Test access with supplied credentials? [Y/n] n

Save settings? [y/N] y
Configuration saved to '/root/.s3cfg'
#修改以下几项
[root@client01 ~]# vim /root/.s3cfg 
access_key = xxx
secret_key = xxx
host_base = 10.12.7.213:10000
host_bucket = ""
use_https = False
#测试rgw接口
[root@ceph01 ~]# s3cmd mb s3://test
ERROR: S3 error: 403 (SignatureDoesNotMatch)
[root@ceph01 ~]# vim .s3cfg 
[root@ceph01 ~]# s3cmd mb s3://test
Bucket 's3://test/' created
[root@ceph01 ~]# s3cmd ls
2024-04-07 10:36  s3://test
[root@ceph01 ~]# s3cmd put kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm s3://test
upload: 'kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm' -> 's3://test/kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm'  [part 1 of 5, 15MB] [1 of 1]
 15728640 of 15728640   100% in    2s     6.56 MB/s  done
upload: 'kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm' -> 's3://test/kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm'  [part 2 of 5, 15MB] [1 of 1]
 15728640 of 15728640   100% in    0s    38.57 MB/s  done
upload: 'kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm' -> 's3://test/kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm'  [part 3 of 5, 15MB] [1 of 1]
 15728640 of 15728640   100% in    0s    37.44 MB/s  done
upload: 'kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm' -> 's3://test/kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm'  [part 4 of 5, 15MB] [1 of 1]
 15728640 of 15728640   100% in    0s    37.66 MB/s  done
upload: 'kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm' -> 's3://test/kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm'  [part 5 of 5, 8MB] [1 of 1]
 8567792 of 8567792   100% in    0s    24.51 MB/s  done
[root@ceph01 ~]# s3cmd ls s3://test
2024-04-07 10:37     71482352  s3://test/kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm
[root@ceph01 ~]# 

12)部署mds节点(cephfs文件系统)

a、安装ceph-mds

#部署cpeh-mds(mds节点都需要安装)
[root@ceph01 ~]# yum -y install ceph-mds
[root@ceph02 ~]# yum -y install ceph-mds
[root@ceph03 ~]# yum -y install ceph-mds
#在管理节点创建mds服务
[root@ceph01 ceph]# ceph-deploy mds create ceph0{1..3}

#创建存储池,启用cephfs文件系统
[root@ceph01 ceph]# ceph osd pool create cephfs-matedata 32 32
pool 'cephfs-matedata' created
[root@ceph01 ceph]# ceph osd pool create cephfs-data 64 64
pool 'cephfs-data' created
#创建cephfs 命令格式:ceph fs new <FS_NAME> <CEPHFS_METADATA_NAME> <CEPHFS_DATA_NAME>
[root@ceph01 ceph]# ceph fs new mycephfs cephfs-matedata cephfs-data 
new fs with metadata pool 9 and data pool 10
#查看cephfs
[root@ceph01 ceph]# ceph fs ls
name: mycephfs, metadata pool: cephfs-matedata, data pools: [cephfs-data ]
[root@ceph01 ceph]# 
#查看mds状态 (目前工作的节点是ceph02)
[root@ceph01 ceph]# ceph mds stat
mycephfs:1 {0=ceph02=up:active} 2 up:standby
#查看cephfs状态
[root@ceph01 ceph]# ceph fs status mycephfs
mycephfs - 1 clients
========
+------+--------+--------+---------------+-------+-------+
| Rank | State  |  MDS   |    Activity   |  dns  |  inos |
+------+--------+--------+---------------+-------+-------+
|  0   | active | ceph02 | Reqs:    0 /s |   15  |   18  |
+------+--------+--------+---------------+-------+-------+
+-----------------+----------+-------+-------+
|       Pool      |   type   |  used | avail |
+-----------------+----------+-------+-------+
| cephfs-matedata | metadata | 1920k |  247G |
|   cephfs-data   |   data   |  455M |  247G |
+-----------------+----------+-------+-------+
+-------------+
| Standby MDS |
+-------------+
|    ceph03   |
|    ceph01   |
+-------------+
MDS version: ceph version 14.2.22 (ca74598065096e6fcbd8433c8779a2be0c889351) nautilus (stable)
[root@ceph01 ceph]#

b、客户端挂载cephfs

#客户端挂载cephfs (后续使用单独key,使用管理key测试)
#管理节点拷贝管理key至client节点
[root@ceph01 ceph]# cat ceph.client.admin.keyring 
[client.admin]
    key = AQDnHg1mFbqgDhAAbwaQVkyKPmrvazh6/RCfTg==
    caps mds = "allow *"
    caps mgr = "allow *"
    caps mon = "allow *"
    caps osd = "allow *"
[root@ceph01 ceph]#
#mds通过mon节点6789端口映射挂载
#命令格式如下:
mount -t ceph node01:6789,node02:6789,node03:6789:/  <本地挂载点目录>  -o name=<用户名>,secret=<秘钥>
mount -t ceph node01:6789,node02:6789,node03:6789:/  <本地挂载点目录>  -o name=<用户名>,secretfile=<秘钥文件>
#使用key挂载示例
[root@client01 ceph]# mount -t ceph ceph01:6789,ceph02:6789,ceph03:6789:/ /opt/ -o name=admin,secret=AQDnHg1mFbqgDhAAbwaQVkyKPmrvazh6/RCfTg== 
[root@client01 ceph]# df -h
Filesystem                                            Size  Used Avail Use% Mounted on
devtmpfs                                              977M     0  977M   0% /dev
tmpfs                                                 991M     0  991M   0% /dev/shm
tmpfs                                                 991M   17M  975M   2% /run
tmpfs                                                 991M     0  991M   0% /sys/fs/cgroup
/dev/mapper/centos-root                                76G  2.2G   74G   3% /
/dev/sda1                                             497M  159M  338M  33% /boot
tmpfs                                                 199M     0  199M   0% /run/user/0
10.12.7.213:6789,10.12.7.214:6789,10.12.7.215:6789:/  248G     0  248G   0% /opt
[root@client01 ceph]# ll /opt/
total 0
[root@client01 ceph]# cd
[root@client01 ~]# cp kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm /opt/
[root@client01 ~]# ll /opt/
total 69807
-rw-r--r--. 1 root root 71482352 Apr  8 11:50 kernel-ml-6.8.2-1.el7.elrepo.x86_64.rpm
[root@client01 ~]# 

#使用key文件挂载示例
#拷贝key文件至客户端,制作key文件(管理admin用户测试,后续新建用户)
[root@ceph01 ceph]# scp ceph.client.admin.keyring client01:/etc/ceph/
ceph.client.admin.keyring                                                                                                  100%  151   187.2KB/s   00:00    
[root@ceph01 ceph]# 
#制作key文件
[root@client01 ceph]#  ceph-authtool -n client.admin -p ceph.client.admin.keyring > admin.key
[root@client01 ceph]# ll
total 16
-rw-r--r--. 1 root root  41 Apr  8 13:53 admin.key
-rw-------. 1 root root 151 Apr  8 11:39 ceph.client.admin.keyring
-rw-r--r--. 1 root root 298 Apr  7 16:16 ceph.conf
-rw-r--r--. 1 root root  92 Jun 30  2021 rbdmap
[root@client01 ceph]# 
#使用key文件挂载
[root@client01 ceph]# mount -t ceph ceph01:6789,ceph02:6789,ceph03:6789:/ /opt/ -o name=admin,secretfile=/etc/ceph/admin.key
[root@client01 ceph]# df -h
Filesystem                                            Size  Used Avail Use% Mounted on
devtmpfs                                              977M     0  977M   0% /dev
tmpfs                                                 991M     0  991M   0% /dev/shm
tmpfs                                                 991M   17M  975M   2% /run
tmpfs                                                 991M     0  991M   0% /sys/fs/cgroup
/dev/mapper/centos-root                                76G  2.2G   74G   3% /
/dev/sda1                                             497M  159M  338M  33% /boot
tmpfs                                                 199M     0  199M   0% /run/user/0
10.12.7.213:6789,10.12.7.214:6789,10.12.7.215:6789:/  248G   68M  248G   1% /opt
[root@client01 ceph]# 

c、创建cephfs用户(普通用户)

#创建cephfs用户(普通用户)
#语法格式:ceph fs authorize  <fs_name>  client.<client_id>  <path-in-cephfs>  rw
#账户为 client.test,用户 name 为 test,test 对ceph文件系统的 / 根目录(注意不是操作系统的根目录)有读写权限
[root@ceph01 ceph]# ceph fs authorize mycephfs client.test / rw | tee /etc/ceph/test.keyring
[client.test]
    key = AQD7pxNmncugGRAAv1YmmCaUJdVYg/hA1tDvRQ==
[root@ceph01 ceph]# ll
total 428
-rw------- 1 root root    113 Apr  3 17:20 ceph.bootstrap-mds.keyring
-rw------- 1 root root    113 Apr  3 17:20 ceph.bootstrap-mgr.keyring
-rw------- 1 root root    113 Apr  3 17:20 ceph.bootstrap-osd.keyring
-rw------- 1 root root    113 Apr  3 17:20 ceph.bootstrap-rgw.keyring
-rw------- 1 root root    151 Apr  3 17:33 ceph.client.admin.keyring
-rw-r--r-- 1 root root    789 Apr  8 11:13 ceph.conf
-rw-r--r-- 1 root root 400872 Apr  8 11:13 ceph-deploy-ceph.log
-rw------- 1 root root     73 Apr  3 16:30 ceph.mon.keyring
-rw-r--r-- 1 root root     92 Jun 30  2021 rbdmap
-rw-r--r-- 1 root root     62 Apr  8 16:16 test.keyring
[root@ceph01 ceph]# 

# 账户为 client.test1,用户 name 为 test1,test1 对文件系统的 / 根目录只有读权限,对文件系统的根目录的子目录 /ceshi 有读写权限
[root@ceph01 ceph]# ceph fs authorize mycephfs client.test1 / r /ceshi rw | tee /etc/ceph/test1.keyring
[client.test1]
    key = AQDPqBNmuopbNBAACBWug5qg3ecJH95HbGe5cw==
[root@ceph01 ceph]# ll
total 432
-rw------- 1 root root    113 Apr  3 17:20 ceph.bootstrap-mds.keyring
-rw------- 1 root root    113 Apr  3 17:20 ceph.bootstrap-mgr.keyring
-rw------- 1 root root    113 Apr  3 17:20 ceph.bootstrap-osd.keyring
-rw------- 1 root root    113 Apr  3 17:20 ceph.bootstrap-rgw.keyring
-rw------- 1 root root    151 Apr  3 17:33 ceph.client.admin.keyring
-rw-r--r-- 1 root root    789 Apr  8 11:13 ceph.conf
-rw-r--r-- 1 root root 400872 Apr  8 11:13 ceph-deploy-ceph.log
-rw------- 1 root root     73 Apr  3 16:30 ceph.mon.keyring
-rw-r--r-- 1 root root     92 Jun 30  2021 rbdmap
-rw-r--r-- 1 root root     63 Apr  8 16:20 test1.keyring
-rw-r--r-- 1 root root     62 Apr  8 16:16 test.keyring
[root@ceph01 ceph]#

4、ceph集群增加osd节点和下线OSD节点

1)增加OSD节点

a、执行系统环境准备的1~6步骤

2.1 关闭防火墙及selinux
2.2 配置hosts解析(将新增节点主机名添加至所有集群节点)
2.3 配置时间同步
2.4 配置yum源
2.5 更新系统内核
2.6 配置ceph-deploy 机器免密登录新增节点

b、执行部署集群的7、8步骤

3.7 初始化新增node节点
1)需要设置标记位执行以下命令,在业务低峰时间同步
    ceph osd set norebalance
    ceph osd set norecover
    ceph osd set noin
    ceph osd set nobackfill
2)执行3.8 步骤部署OSD服务
3)部署完成后在低峰时间执行以下命令同步
    ceph osd unset norebalance
    ceph osd unset norecover
    ceph osd unset noin
    ceph osd unset nobackfill

2)下线OSD节点

a、剔除需要下线node节点的所有OSD

a)命令格式如下
#查看osd的ID(管理节点执行)
ceph osd tree
#停用设备(管理节点执行)
ceph osd out {osd-num}
#停止进程(OSD节点执行)
systemctl stop ceph-osd@{osd-num}
#移除设备(管理节点执行)
ceph osd purge {osd-num} --yes-i-really-mean-it
b)剔除OSD 节点(下线节点每个OSD都需要执行以下步骤)
#查看osd的ID(管理节点执行)
[root@ceph01 ceph]# ceph osd tree
ID  CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF 
 -1       0.97595 root default                            
 -3       0.19519     host ceph01                         
  0   hdd 0.04880         osd.0       up  1.00000 1.00000 
  1   hdd 0.04880         osd.1       up  1.00000 1.00000 
  2   hdd 0.04880         osd.2       up  1.00000 1.00000 
  3   hdd 0.04880         osd.3       up  1.00000 1.00000 
 -5       0.19519     host ceph02                         
  4   hdd 0.04880         osd.4       up  1.00000 1.00000 
  5   hdd 0.04880         osd.5       up  1.00000 1.00000 
  6   hdd 0.04880         osd.6       up  1.00000 1.00000 
  7   hdd 0.04880         osd.7       up  1.00000 1.00000 
 -7       0.19519     host ceph03                         
  8   hdd 0.04880         osd.8       up  1.00000 1.00000 
  9   hdd 0.04880         osd.9       up  1.00000 1.00000 
 10   hdd 0.04880         osd.10      up  1.00000 1.00000 
 11   hdd 0.04880         osd.11      up  1.00000 1.00000 
 -9       0.19519     host ceph04                         
 12   hdd 0.04880         osd.12      up  1.00000 1.00000 
 13   hdd 0.04880         osd.13      up  1.00000 1.00000 
 14   hdd 0.04880         osd.14      up  1.00000 1.00000 
 15   hdd 0.04880         osd.15      up  1.00000 1.00000 
-11       0.19519     host ceph05                         
 16   hdd 0.04880         osd.16      up  1.00000 1.00000 
 17   hdd 0.04880         osd.17      up  1.00000 1.00000 
 18   hdd 0.04880         osd.18      up  1.00000 1.00000 
 19   hdd 0.04880         osd.19      up  1.00000 1.00000 
 #停用设备(管理节点执行)将一个OSD彻底移除后查看集群才能接着剔除
 [root@ceph01 ceph]# ceph osd out osd.19
 #检查集群状态正常OSD node节点停止改OSD服务
 [root@ceph02 ~]# ceph -s
  cluster:
    id:     32e0a117-f0bf-4dbf-b077-e649757d433a
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 5h)
    mgr: ceph01(active, since 5h), standbys: ceph03, ceph02
    mds: mycephfs:1 {0=ceph03=up:active} 2 up:standby
    osd: 20 osds: 20 up (since 5m), 20 in (since 5m)
    rgw: 3 daemons active (ceph01, ceph02, ceph03)
 
  task status:
 
  data:
    pools:   10 pools, 608 pgs
    objects: 363 objects, 386 MiB
    usage:   59 GiB used, 941 GiB / 1000 GiB avail
    pgs:     608 active+clean
 
[root@ceph02 ~]#
#OSD node节点停止对应OSD ID服务
[root@ceph05 ~]# ps -ef | grep osd
ceph        2343       1  2 15:50 ?        00:00:18 /usr/bin/ceph-osd -f --cluster ceph --id 16 --setuser ceph --setgroup ceph
ceph        2790       1  2 15:51 ?        00:00:18 /usr/bin/ceph-osd -f --cluster ceph --id 17 --setuser ceph --setgroup ceph
ceph        3233       1  2 15:51 ?        00:00:16 /usr/bin/ceph-osd -f --cluster ceph --id 18 --setuser ceph --setgroup ceph
ceph        3674       1  2 15:52 ?        00:00:15 /usr/bin/ceph-osd -f --cluster ceph --id 19 --setuser ceph --setgroup ceph
root        3841    1446  0 16:02 pts/0    00:00:00 grep --color=auto osd
[root@ceph05 ~]# systemctl stop ceph-osd@19
[root@ceph05 ~]# ps -ef | grep osd
ceph        2343       1  2 15:50 ?        00:00:18 /usr/bin/ceph-osd -f --cluster ceph --id 16 --setuser ceph --setgroup ceph
ceph        2790       1  2 15:51 ?        00:00:18 /usr/bin/ceph-osd -f --cluster ceph --id 17 --setuser ceph --setgroup ceph
ceph        3233       1  2 15:51 ?        00:00:17 /usr/bin/ceph-osd -f --cluster ceph --id 18 --setuser ceph --setgroup ceph
root        3854    1446  0 16:02 pts/0    00:00:00 grep --color=auto osd
[root@ceph05 ~]# 
#移除设备(管理节点执行)
#命令1
[root@ceph01 ceph]# ceph osd crush remove osd.19
removed item id 19 name 'osd.19' from crush map
[root@ceph01 ~]# ceph osd purge osd.19 --yes-i-really-mean-it 
#命令2 
[root@ceph01 ceph]# ceph osd crush remove osd.19
removed item id 19 name 'osd.19' from crush map
[root@ceph01 ceph]# ceph auth del osd.19
updated
[root@ceph01 ceph]# ceph osd rm osd.19
removed osd.19


[root@ceph01 ceph]# ceph osd tree
ID  CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF 
 -1       0.92715 root default                            
 -3       0.19519     host ceph01                         
  0   hdd 0.04880         osd.0       up  1.00000 1.00000 
  1   hdd 0.04880         osd.1       up  1.00000 1.00000 
  2   hdd 0.04880         osd.2       up  1.00000 1.00000 
  3   hdd 0.04880         osd.3       up  1.00000 1.00000 
 -5       0.19519     host ceph02                         
  4   hdd 0.04880         osd.4       up  1.00000 1.00000 
  5   hdd 0.04880         osd.5       up  1.00000 1.00000 
  6   hdd 0.04880         osd.6       up  1.00000 1.00000 
  7   hdd 0.04880         osd.7       up  1.00000 1.00000 
 -7       0.19519     host ceph03                         
  8   hdd 0.04880         osd.8       up  1.00000 1.00000 
  9   hdd 0.04880         osd.9       up  1.00000 1.00000 
 10   hdd 0.04880         osd.10      up  1.00000 1.00000 
 11   hdd 0.04880         osd.11      up  1.00000 1.00000 
 -9       0.19519     host ceph04                         
 12   hdd 0.04880         osd.12      up  1.00000 1.00000 
 13   hdd 0.04880         osd.13      up  1.00000 1.00000 
 14   hdd 0.04880         osd.14      up  1.00000 1.00000 
 15   hdd 0.04880         osd.15      up  1.00000 1.00000 
-11       0.14639     host ceph05                         
 16   hdd 0.04880         osd.16      up  1.00000 1.00000 
 17   hdd 0.04880         osd.17      up  1.00000 1.00000 
 18   hdd 0.04880         osd.18      up  1.00000 1.00000 
[root@ceph01 ceph]# 
#将节点上所有OSD都卸载后执行以下命令(管理节点执行)
[root@ceph01 ceph]# ceph osd crush remove ceph05

  • 19
    点赞
  • 30
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值