创建Ceph块存储

创建Ceph块存储

使用Ceph集群的块存储功能,实现:
创建块存储镜像
客户端映射镜像
创建镜像快照
使用快照还原数据
使用快照克隆镜像
删除快照与镜像

步骤一:创建镜像

node1操作:

1)查看存储池(默认有一个rbd池)
# ceph osd lspools
0 rbd,

2)创建镜像、查看镜像
# rbd create demo-image --image-feature layering --size 10G
# rbd create rbd/image --image-feature layering --size 10G #在rbd这个池子里做了一个磁盘
# rbd ls
可以看到有池子里有2个盘
demo-image
image

创建的话 可以ceph osd create
删除的话 可以ceph osd delete

# rbd info image
rbd image 'image':
size 10240 MB in 2560 objects #看到有10个G
order 22 (4096 kB objects)
block_name_prefix: rbd_data.103c238e1f29
format: 2
features: layering
flags:
--------------------------------------------------------------------------------------------
如果打错了。重新来的话,要先卸载光盘

# df -h
文件系统 容量 已用 可用 已用% 挂载点
/dev/mapper/rhel-root 17G 12G 5.5G 69% /
devtmpfs 481M 0 481M 0% /dev
tmpfs 497M 0 497M 0% /dev/shm
tmpfs 497M 7.1M 490M 2% /run
tmpfs 497M 0 497M 0% /sys/fs/cgroup
/dev/vda1 1014M 161M 854M 16% /boot
tmpfs 100M 0 100M 0% /run/user/0
/dev/vdc1 20G 35M 20G 1% /var/lib/ceph/osd/ceph-0
/dev/vdd1 20G 35M 20G 1% /var/lib/ceph/osd/ceph-1
---------------------------------------------------------------------------
[root@node1 ceph-cluster]# ceph-deploy --help
... ...
uninstall Remove Ceph packages from remote hosts. #卸载
purge Remove Ceph packages from remote hosts and purge all #卸载和清除数据
data.

... ...
-----------------------------------------------------------------------------
# rbd resize --size 7G image --allow-shrink #减少到7G
Resizing image: 100% complete...done.

[root@node1 ceph-cluster]# rbd info image
rbd image 'image':
size 7168 MB in 1792 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.103c238e1f29
format: 2
features: layering
flags:

[root@node1 ceph-cluster]# rbd resize --size 15G image #增加到15G
Resizing image: 100% complete...done.

[root@node1 ceph-cluster]# rbd info image
rbd image 'image':
size 15360 MB in 3840 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.103c238e1f29
format: 2
features: layering
flags:
----------------------------------------
client操作:
# cd /etc/ceph/
[root@client ceph]# ls
rbdmap

node1操作:
# scp /etc/ceph/ceph.client.admin.keyring /etc/ceph/ceph.conf client:/etc/ceph

client操作:
# cd /etc/ceph/
[root@client ceph]# ls
rbdmap
-------------------------------------------------------------
步骤三:通过KRBD访问
1)集群内将镜像映射为本地磁盘
[root@node1 ~]# rbd map demo-image
/dev/rbd0
[root@node1 ~]# lsblk
… …
rbd0 251:0 0 10G 0 disk
[root@node1 ~]# mkfs.xfs /dev/rbd0
[root@node1 ~]# mount /dev/rbd0 /mnt

2)客户端通过KRBD访问
#客户端需要安装ceph-common软件包
#拷贝配置文件(否则不知道集群在哪)
#拷贝连接密钥(否则无连接权限)
[root@client ~]# yum -y install ceph-common
[root@client ~]# scp 192.168.4.11:/etc/ceph/ceph.conf /etc/ceph/
[root@client ~]# scp 192.168.4.11:/etc/ceph/ceph.client.admin.keyring /etc/ceph/
[root@client ~]# rbd map image
[root@client ~]# lsblk
[root@client ~]# rbd showmapped
id pool image snap device
0 rbd image - /dev/rbd0

3) 客户端格式化、挂载分区
[root@client ~]# mkfs.xfs /dev/rbd0
[root@client ~]# mount /dev/rbd0 /mnt/
[root@client ~]# echo "test" > /mnt/test.txt
--------------------------------------------------------------
ceph存储
1.块 120---->5G(共享)------------>(客户端)

客户端
yum -y install ceph-common
/etc/ceph/ceph.conf
/etc/ceph/ceph.client.admin.keyring
-------------------------------------------------------------
# rbd map demo-image
/dev/rbd0
[root@client ceph]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 20G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 19G 0 part
├─rhel-root 253:0 0 17G 0 lvm /
└─rhel-swap 253:1 0 2G 0 lvm [SWAP]
rbd0 251:0 0 10G 0 disk
[root@client ceph]# rbd showmapped
id pool image snap device
0 rbd demo-image - /dev/rbd0
[root@client ceph]#
[root@client ceph]# mkfs.xfs /dev/rbd0
mkfs.xfs: /dev/rbd0 appears to contain an existing filesystem (xfs).
mkfs.xfs: Use the -f option to force overwrite.
[root@client ceph]# mount /dev/rbd0 /mnt/
[root@client ceph]# echo "test" > /mnt/test.txt
[root@client ceph]# ls /mnt/
test.txt
[root@client ceph]# cat /mnt/test.txt
test
[root@client ceph]#

# rbd unmap demo-image
rbd: sysfs write failed
rbd: unmap failed: (16) Device or resource busy
[root@client ceph]# rbd map image
/dev/rbd2
[root@client ceph]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 20G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 19G 0 part
├─rhel-root 253:0 0 17G 0 lvm /
└─rhel-swap 253:1 0 2G 0 lvm [SWAP]
rbd0 251:0 0 10G 0 disk /mnt
rbd1 251:16 0 15G 0 disk
rbd2 251:32 0 15G 0 disk
---------------------------------------------------------------
# rbd ls
demo-image
image
[root@node1 ~]# rbd snap create image --snap image-snap1
[root@node1 ~]# rbd snap ls image
SNAPID NAME SIZE
4 image-snap1 15360 MB

---------------------------------------------------------------
步骤四:创建镜像快照

1) 查看镜像快照
[root@node1 ~]# rbd snap ls image

2) 创建镜像快照
[root@node1 ~]# rbd snap create image --snap image-snap1
[root@node1 ~]# rbd snap ls image
SNAPID NAME SIZE
4 image-snap1 15360 MB

3) 删除客户端写入的测试文件
[root@client ~]# rm -rf /mnt/test.txt

4) 还原快照
[root@node1 ~]# rbd snap rollback image --snap image-snap1
#客户端重新挂载分区
[root@client ~]# umount /mnt
[root@client ~]# mount /dev/rbd0 /mnt/
[root@client ~]# ls /mnt

 

转载于:https://www.cnblogs.com/summer2/p/10788054.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值