一、创建块设备并挂载给客户机
1)创建资源池,并将其设置为rbd类型
[root@centos1 ~]# ceph osd pool create rbd_pool 128 128
pool 'rbd_pool' created
[root@centos1 ~]# ceph osd pool application enable rbd_pool rbd
2)创建rbd设备
[root@centos1 ~]# rbd create --size 1G rbd_pool/rbd1
[root@centos1 ~]# rbd create --size 2G -p rbd_pool rbd2
查看
[root@centos1 ~]# rbd info rbd_pool/rbd1
rbd image 'rbd1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
id: 1e5f76b8b4567
block_name_prefix: rbd_data.1e5f76b8b4567
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sat Mar 30 19:48:27 2024
3)创建用户
创建用户rbd,使其对所有rbd设备有读写权限
[root@centos1 ~]# ceph auth add client.rbd mon 'allow r' osd 'allow rwx pool=rbd_pool'
added key for client.rbd
将用户block的keyring文件导出为ceph.client.rbd.keyring
[root@centos1 ~]# ceph auth get client.rbd -o /etc/ceph/ceph.client.rbd.keyring
exported keyring for client.rbd
将该文件发送到客户端的/etc/ceph目录中
[root@centos1 ~]# scp /etc/ceph/ceph.client.rbd.keyring 192.168.136.143:/etc/ceph/
映射镜像
[root@centos2 ~]# rbd map rbd_pool/rbd1
/dev/rbd0
[root@centos2 ceph]# rbd map rbd_pool/rbd2
/dev/rbd1
出现的一些问题
[root@centos2 ~]# rbd map rbd_pool/rbd1
rbd: sysfs write failed RBD image feature set mismatch. Try disabling features unsupported by the kernel with "rbd feature disable". In some cases useful info is found in syslog - try "dmesg | tail". rbd: map failed: (6) No such device or address
[root@centos2 ~]# rbd feature disable rbd_pool/rbd1 object-map fast-diff deep-flatten [root@centos2 ~]# rbd map rbd_pool/rbd1
/dev/rbd0
这个命令会禁用指定RBD镜像的特性,以确保与内核匹配
将映射后的设备格式化,并按照规划,挂载到指定目录下,
[root@centos2 ceph]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=9, agsize=31744 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@centos2 ceph]# mkfs.xfs /dev/rbd1
meta-data=/dev/rbd1 isize=512 agcount=9, agsize=64512 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=524288, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@centos2 ceph]# mkdir /mnt/rbd{1..2}
[root@centos2 ceph]# mount /dev/rbd0 /mnt/rbd1
[root@centos2 ceph]# mount /dev/rbd1 /mnt/rbd2
查看
[root@centos2 ceph]# rbd showmapped --id rbd
id pool image snap device
0 rbd_pool rbd1 - /dev/rbd0
1 rbd_pool rbd2 - /dev/rbd1
二、RBD 设备扩容
rbd1 扩容到 2G
[root@centos1 ceph]# rbd resize rbd_pool/rbd1 --size 2G --id rbd
Resizing image: 100% complete...done.
查看
[root@centos1 ceph]# rbd info rbd_pool/rbd1 --id rbd
rbd image 'rbd1':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
id: 1e5f76b8b4567
block_name_prefix: rbd_data.1e5f76b8b4567
format: 2
features: layering, exclusive-lock
op_features:
flags:
create_timestamp: Sat Mar 30 19:48:27 2024
客户端扩容
Rbd 设备扩容后,无法直接在客户端生效,需要在执行分区扩容才可以完全完成扩容操作,具 体如下:
[root@centos2 ceph]# xfs_growfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=9, agsize=31744 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 262144 to 524288
[root@centos2 ceph]# df -h
文件系统 容量 已用 可用 已用% 挂载点
/dev/mapper/centos-root 17G 5.4G 12G 32% /
devtmpfs 1.4G 0 1.4G 0% /dev
tmpfs 1.4G 36K 1.4G 1% /dev/shm
tmpfs 1.4G 8.8M 1.4G 1% /run
tmpfs 1.4G 0 1.4G 0% /sys/fs/cgroup
/dev/sda1 1014M 143M 872M 15% /boot
tmpfs 1.4G 28K 1.4G 1% /var/lib/ceph/osd/ceph-1
192.168.136.131:/srv/nfs_template_images 17G 4.7G 13G 28% /opt/template_images
tmpfs 284M 0 284M 0% /run/user/0
/dev/rbd0 1014M 33M 982M 4% /mnt/rbd1
/dev/rbd1 2.0G 33M 2.0G 2% /mnt/rbd2
三、RBD 快照管理
本实验将为 rbd2 创建快照 snap1,并基于该快照克隆一个新的 rbd 设备,将其命名为 rbd3。
1) 在 Client 的/mnt/rbd1/test1.txt 中创建测试文件 test,并输入“this is a test file”
[root@centos2 ceph]# echo this is a test file > /mnt/rbd1/test1.txt
2) 创建快照
[root@centos2 ceph]# rbd snap create rbd_pool/rbd1@snap1 --id rbd
[root@centos2 ceph]# rbd snap ls rbd_pool/rbd1 --id rbd
SNAPID NAME SIZE TIMESTAMP
4 snap1 2 GiB Sat Mar 30 21:12:56 2024
3) 卸载并取消映射之后回滚快照,检查是否可以恢复
[root@centos2 ceph]# umount /mnt/rbd1
[root@centos2 ceph]# rbd unmap rbd_pool/rbd1
[root@centos2 ceph]# rbd snap rollback rbd_pool/rbd1@snap1 --id rbd
Rolling back to snapshot: 100% complete...done.
重新映射并手动挂载
[root@centos2 ceph]# rbd map rbd_pool/rbd1 --id rbd
/dev/rbd0
[root@centos2 ceph]# mount /dev/rbd0 /mnt/rbd1
查看文件内容
[root@centos2 ceph]# cat /mnt/rbd1/test1.txt
this is a test file
4) 使用快照创建克隆卷并挂载
使用以下命令将快照设为保护模式:
[root@centos2 ~]# rbd snap protect rbd_pool/rbd1@snap1 --id rbd
[root@centos2 ~]# rbd snap rm rbd_pool/rbd1@snap1 --id rbd
Removing snap: 0% complete...failed.
2024-03-31 09:34:49.819 7f154efcd840 -1 librbd::Operations: snapshot is protected
rbd: snapshot 'snap1' is protected from removal.
使用以下命令创建 snap1 的克隆卷 rbd3:
[root@centos2 ~]# rbd clone rbd_pool/rbd1@snap1 rbd_pool/rbd3 --id rbd
将克隆卷转为独立 rbd 设备:(可选项)
[root@centos2 ~]# rbd flatten rbd_pool/rbd3
Image flatten: 100% complete...done.
将生成的 rbd3 映射
[root@centos2 ~]# rbd map rbd_pool/rbd3 --id rbd
/dev/rbd2
然后将该克隆卷的母卷卸载,最后再将克隆卷挂载到新创建的目录/mnt/rbd3 中
[root@centos2 ~]# umount /mnt/rbd1
[root@centos2 ~]# mkidr /mnt/rbd3
[root@centos2 ~]# mount /dev/rbd2 /mnt/rbd3
查看
[root@centos2 ~]# ls /mnt/rbd3
test1.txt
四、导出导入RBD镜像
导出RBD image文件
[root@centos2 ~]# rbd export rbd_pool/rbd1 rbd1.img --id rbd
Exporting image: 100% complete...done.
导入RBD image文件
[root@centos2 ~]# rbd import rbd1.img rbd_pool/test1 --id rbd
Importing image: 100% complete...done.
[root@centos2 ~]# rbd ls -p rbd_pool --id rbd
rbd1
rbd2
rbd3
rbd4
test1