1、ext4文件系统的rbd在线扩容:
blockdev --getsize64 /dev/rbd0 ---从命令行调用区块设备控制程序,获取文件大小
resize2fs /dev/rbd0 --改变文件大小
举例:
rados mkpool bboss
rados lspools
rbd create bboss/test --size 4096 -k /etc/ceph/ceph.client.admin.keyring
rbd map test --pool bboss --name client.admin -k /etc/ceph/ceph.client.admin.keyring
/dev/rbd0
rbd ls bboss
mkfs.ext4 -m0 /dev/rbd0
mkdir /mnt/ceph-block-device
mount /dev/rbd0 /mnt/ceph-block-device
[root@xhw299 ~]# rbd info bboss/test
rbd image 'test':
size 4096 MB in 1024 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.d47d2ae8944a
format: 2
features: layering
flags:
rbd resize --image test --size 8192
rbd resize -p bboss --size 8192 test
[root@xhw299 ~]# rbd info bboss/test
rbd image 'test':
size 8192 MB in 2048 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.d47d2ae8944a
format: 2
features: layering
flags:
创建文件,验证下文件是否丢失:
[root@xhw299 ~]# cd /mnt/ceph-block-device
[root@xhw299 ceph-block-device]# ls
lost+found
[root@xhw299 ceph-block-device]# echo test > bboss
[root@xhw299 ceph-block-device]# cat bboss
test
[root@xhw299 ceph-block-device]# ls
bboss lost+found
[root@xhw299 ceph-block-device]# cd
磁盘大小没有变:
[root@xhw299 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc_vg-root 100G 1.9G 99G 2% /
devtmpfs 126G 0 126G 0% /dev
tmpfs 126G 4.0K 126G 1% /dev/shm
tmpfs 126G 594M 126G 1% /run
tmpfs 126G 0 126G 0% /sys/fs/cgroup
/dev/sda1 497M 111M 387M 23% /boot
tmpfs 26G 0 26G 0% /run/user/0
/dev/rbd0 3.9G 16M 3.8G 1% /mnt/ceph-block-device
[root@xhw299 ~]# umount /mnt/ceph-block-device
磁盘大小改变:
[root@xhw299 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc_vg-root 100G 1.9G 99G 2% /
devtmpfs 126G 0 126G 0% /dev
tmpfs 126G 4.0K 126G 1% /dev/shm
tmpfs 126G 594M 126G 1% /run
tmpfs 126G 0 126G 0% /sys/fs/cgroup
/dev/sda1 497M 111M 387M 23% /boot
tmpfs 26G 0 26G 0% /run/user/0
/dev/rbd0 7.8G 18M 7.8G 1% /mnt/ceph-block-device
文件没有丢失:
[root@xhw299 ~]# cat /mnt/ceph-block-device/bboss
test
2、xfs系统的rbd在线扩容
rbd:
1)修改Ceph配置文件/etc/ceph/ceph.conf,在global 下,增加
rbd_default_features = 1
2)创建设备块
[root@host15 /root>]
$ rbd create ceph-client1-rbd1 --size 1024
3)映射文件
[root@host15 /root>]
$ rbd map ceph-client1-rbd1 --pool rbd --name client.admin -k /etc/ceph/ceph.client.admin.keyring
4)查看映射的设备
[root@host15 /root>]
$ rbd showmapped
id pool image snap device
0 rbd ceph-client1-rbd1 - /dev/rbd0
5)查看 设备
[root@host15 /root>]
$ fdisk -l /dev/rbd0
Disk /dev/rbd0: 1073 MB, 1073741824 bytes, 2097152 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
6)格式化为xfs文件系统
[root@host15 /root>]
$ mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=256 agcount=9, agsize=31744 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
7)挂载文件系统
[root@host15 /root>]
$ mkdir /mnt/ceph-voll
[root@host15 /root>]
$ mount /dev/rbd
rbd/ rbd0
[root@host15 /root>]
$ mount /dev/rbd0 /mnt/ceph-voll/
8)向设备上挂载数据
[root@host15 /root>]
$ dd if=/dev/zero of=/mnt/ceph-voll/file1 count=100 bs=1M
100+0 records in
100+0 records out
104857600 bytes (105 MB) copied, 0.0666746 s, 1.6 GB/s
[root@host15 /root>]
$ cd /mnt/ceph-voll/
[root@host15 /mnt/ceph-voll>]
$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc-root 8.8G 5.3G 3.5G 61% /
devtmpfs 7.9G 0 7.9G 0% /dev
tmpfs 7.9G 4.0K 7.9G 1% /dev/shm
tmpfs 7.9G 249M 7.7G 4% /run
tmpfs 7.9G 0 7.9G 0% /sys/fs/cgroup
/dev/sda1 190M 104M 72M 60% /boot
tmpfs 1.6G 0 1.6G 0% /run/user/0
/dev/rbd0 1014M 133M 882M 14% /mnt/ceph-voll
[root@host15 /mnt/ceph-voll>]
$ ls -la
'total 102400
drwxr-xr-x 2 root root 18 Oct 13 09:55 .
drwxr-xr-x. 4 root root 32 Oct 13 09:54 ..
-rw-r--r-- 1 root root 104857600 Oct 13 09:55 file1
9)rbd调整大小
[root@host15 /mnt/ceph-voll>]
$ rbd resize rbd/ceph-client1-rbd1 --size 2048
Resizing image: 100% complete...done.
[root@host15 /mnt/ceph-voll>]
$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc-root 8.8G 5.3G 3.5G 61% /
devtmpfs 7.9G 0 7.9G 0% /dev
tmpfs 7.9G 4.0K 7.9G 1% /dev/shm
tmpfs 7.9G 249M 7.7G 4% /run
tmpfs 7.9G 0 7.9G 0% /sys/fs/cgroup
/dev/sda1 190M 104M 72M 60% /boot
tmpfs 1.6G 0 1.6G 0% /run/user/0
/dev/rbd0 1014M 133M 882M 14% /mnt/ceph-voll
检查新的容量是否已经被内和接受(此命令是核心命令)
[root@host15 /mnt/ceph-voll>]
$ xfs_growfs -d /mnt/ceph-voll/
meta-data=/dev/rbd0 isize=256 agcount=9, agsize=31744 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 262144 to 524288
[root@host15 /mnt/ceph-voll>]
$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc-root 8.8G 5.3G 3.5G 61% /
devtmpfs 7.9G 0 7.9G 0% /dev
tmpfs 7.9G 4.0K 7.9G 1% /dev/shm
tmpfs 7.9G 249M 7.7G 4% /run
tmpfs 7.9G 0 7.9G 0% /sys/fs/cgroup
/dev/sda1 190M 104M 72M 60% /boot
tmpfs 1.6G 0 1.6G 0% /run/user/0
/dev/rbd0 2.0G 133M 1.9G 7% /mnt/ceph-voll
blockdev --getsize64 /dev/rbd0 ---从命令行调用区块设备控制程序,获取文件大小
resize2fs /dev/rbd0 --改变文件大小
举例:
rados mkpool bboss
rados lspools
rbd create bboss/test --size 4096 -k /etc/ceph/ceph.client.admin.keyring
rbd map test --pool bboss --name client.admin -k /etc/ceph/ceph.client.admin.keyring
/dev/rbd0
rbd ls bboss
mkfs.ext4 -m0 /dev/rbd0
mkdir /mnt/ceph-block-device
mount /dev/rbd0 /mnt/ceph-block-device
[root@xhw299 ~]# rbd info bboss/test
rbd image 'test':
size 4096 MB in 1024 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.d47d2ae8944a
format: 2
features: layering
flags:
rbd resize --image test --size 8192
rbd resize -p bboss --size 8192 test
[root@xhw299 ~]# rbd info bboss/test
rbd image 'test':
size 8192 MB in 2048 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.d47d2ae8944a
format: 2
features: layering
flags:
创建文件,验证下文件是否丢失:
[root@xhw299 ~]# cd /mnt/ceph-block-device
[root@xhw299 ceph-block-device]# ls
lost+found
[root@xhw299 ceph-block-device]# echo test > bboss
[root@xhw299 ceph-block-device]# cat bboss
test
[root@xhw299 ceph-block-device]# ls
bboss lost+found
[root@xhw299 ceph-block-device]# cd
磁盘大小没有变:
[root@xhw299 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc_vg-root 100G 1.9G 99G 2% /
devtmpfs 126G 0 126G 0% /dev
tmpfs 126G 4.0K 126G 1% /dev/shm
tmpfs 126G 594M 126G 1% /run
tmpfs 126G 0 126G 0% /sys/fs/cgroup
/dev/sda1 497M 111M 387M 23% /boot
tmpfs 26G 0 26G 0% /run/user/0
/dev/rbd0 3.9G 16M 3.8G 1% /mnt/ceph-block-device
[root@xhw299 ~]# umount /mnt/ceph-block-device
磁盘大小改变:
[root@xhw299 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc_vg-root 100G 1.9G 99G 2% /
devtmpfs 126G 0 126G 0% /dev
tmpfs 126G 4.0K 126G 1% /dev/shm
tmpfs 126G 594M 126G 1% /run
tmpfs 126G 0 126G 0% /sys/fs/cgroup
/dev/sda1 497M 111M 387M 23% /boot
tmpfs 26G 0 26G 0% /run/user/0
/dev/rbd0 7.8G 18M 7.8G 1% /mnt/ceph-block-device
文件没有丢失:
[root@xhw299 ~]# cat /mnt/ceph-block-device/bboss
test
2、xfs系统的rbd在线扩容
rbd:
1)修改Ceph配置文件/etc/ceph/ceph.conf,在global 下,增加
rbd_default_features = 1
2)创建设备块
[root@host15 /root>]
$ rbd create ceph-client1-rbd1 --size 1024
3)映射文件
[root@host15 /root>]
$ rbd map ceph-client1-rbd1 --pool rbd --name client.admin -k /etc/ceph/ceph.client.admin.keyring
4)查看映射的设备
[root@host15 /root>]
$ rbd showmapped
id pool image snap device
0 rbd ceph-client1-rbd1 - /dev/rbd0
5)查看 设备
[root@host15 /root>]
$ fdisk -l /dev/rbd0
Disk /dev/rbd0: 1073 MB, 1073741824 bytes, 2097152 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
6)格式化为xfs文件系统
[root@host15 /root>]
$ mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=256 agcount=9, agsize=31744 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
7)挂载文件系统
[root@host15 /root>]
$ mkdir /mnt/ceph-voll
[root@host15 /root>]
$ mount /dev/rbd
rbd/ rbd0
[root@host15 /root>]
$ mount /dev/rbd0 /mnt/ceph-voll/
8)向设备上挂载数据
[root@host15 /root>]
$ dd if=/dev/zero of=/mnt/ceph-voll/file1 count=100 bs=1M
100+0 records in
100+0 records out
104857600 bytes (105 MB) copied, 0.0666746 s, 1.6 GB/s
[root@host15 /root>]
$ cd /mnt/ceph-voll/
[root@host15 /mnt/ceph-voll>]
$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc-root 8.8G 5.3G 3.5G 61% /
devtmpfs 7.9G 0 7.9G 0% /dev
tmpfs 7.9G 4.0K 7.9G 1% /dev/shm
tmpfs 7.9G 249M 7.7G 4% /run
tmpfs 7.9G 0 7.9G 0% /sys/fs/cgroup
/dev/sda1 190M 104M 72M 60% /boot
tmpfs 1.6G 0 1.6G 0% /run/user/0
/dev/rbd0 1014M 133M 882M 14% /mnt/ceph-voll
[root@host15 /mnt/ceph-voll>]
$ ls -la
'total 102400
drwxr-xr-x 2 root root 18 Oct 13 09:55 .
drwxr-xr-x. 4 root root 32 Oct 13 09:54 ..
-rw-r--r-- 1 root root 104857600 Oct 13 09:55 file1
9)rbd调整大小
[root@host15 /mnt/ceph-voll>]
$ rbd resize rbd/ceph-client1-rbd1 --size 2048
Resizing image: 100% complete...done.
[root@host15 /mnt/ceph-voll>]
$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc-root 8.8G 5.3G 3.5G 61% /
devtmpfs 7.9G 0 7.9G 0% /dev
tmpfs 7.9G 4.0K 7.9G 1% /dev/shm
tmpfs 7.9G 249M 7.7G 4% /run
tmpfs 7.9G 0 7.9G 0% /sys/fs/cgroup
/dev/sda1 190M 104M 72M 60% /boot
tmpfs 1.6G 0 1.6G 0% /run/user/0
/dev/rbd0 1014M 133M 882M 14% /mnt/ceph-voll
检查新的容量是否已经被内和接受(此命令是核心命令)
[root@host15 /mnt/ceph-voll>]
$ xfs_growfs -d /mnt/ceph-voll/
meta-data=/dev/rbd0 isize=256 agcount=9, agsize=31744 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 262144 to 524288
[root@host15 /mnt/ceph-voll>]
$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/zc-root 8.8G 5.3G 3.5G 61% /
devtmpfs 7.9G 0 7.9G 0% /dev
tmpfs 7.9G 4.0K 7.9G 1% /dev/shm
tmpfs 7.9G 249M 7.7G 4% /run
tmpfs 7.9G 0 7.9G 0% /sys/fs/cgroup
/dev/sda1 190M 104M 72M 60% /boot
tmpfs 1.6G 0 1.6G 0% /run/user/0
/dev/rbd0 2.0G 133M 1.9G 7% /mnt/ceph-voll