##查看ceph 使用量
[root@ceph02 ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
400 GiB 392 GiB 8.1 GiB 2.01
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
images 1 4.6 MiB 0 186 GiB 14
images2 2 73 B 0 186 GiB 5
###查看pool 个数创建pool
[root@ceph02 ~]# ceph osd pool ls
images
images2
[root@ceph02 ~]# ceph osd pool create images3 32
pool 'images3' created
##查看 pool 的副本数和pg数
[root@ceph02 ~]# ceph osd pool get images size
size: 2
[root@ceph02 ~]# ceph osd pool get images pg_num
pg_num: 32
###查看 pool 中的块设备
[root@ceph02 ~]# rbd ls images
test_disk
test_disk1
test_disk2
[root@ceph02 ~]#
##创建块设备
[root@ceph02 ~]# rbd create --size 50 images3/test_disk1
[root@ceph02 ~]# rbd info images3/test_disk1
rbd image 'test_disk1':
size 50 MiB in 13 objects
order 22 (4 MiB objects)
id: 13216b8b4567
block_name_prefix: rbd_data.13216b8b4567
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Fri Sep 27 10:59:26 2019
[root@ceph02 ~]#
###修改块设备属性
rbd feature disable images3/test_disk1 exclusive-lock object-map fast-diff deep-flatten
###ceph client 查看
[root@ceph01 ~]# rbd --image images/test_disk info
rbd image 'test_disk':
size 50 MiB in 13 objects
order 22 (4 MiB objects)
id: 11b56b8b4567
block_name_prefix: rbd_data.11b56b8b4567
format: 2
features: layering
op_features:
flags:
create_timestamp: Thu Sep 26 15:57:08 2019
### 客户端映射块设备
rbd map images/test_disk
/dev/rbd1
###fdisk -l
Disk /dev/rbd1: 52 MB, 52428800 bytes, 102400 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
##格式化和mount
mkfs.ext4 /dev/rbd1
mount /dev/rbd1 /opt