ceph 示例

环境规划
一台客户端虚拟机 三台存储集群虚拟机
client 192.168.4.10/24
node1 192.168.4.11/24
node2 192.168.4.12/24
node3 192.168.4.13/24

在真机上将rhceph-2.0-rhel-7-x86_64.tar .gz解压,做成yum源,以便后面使用
[root@room9pc01 ~]# cat /etc/yum.repos.d/ceph.repo
[mon]
name=mon
baseurl=http://192.168.4.254/rhceph-2.0-rhel-7-x86_64/MON
gpgcheck=0
[osd]
name=osd
baseurl=http://192.168.4.254/rhceph-2.0-rhel-7-x86_64/OSD
gpgcheck=0
[tools]
name=tools
baseurl=http://192.168.4.254/rhceph-2.0-rhel-7-x86_64/Tools
gpgcheck=0

将该yum源传给这四台机
[root@room9pc01 ~]# for i in {10..13}

do
scp /etc/yum.repos.d/ceph.repo 192.168.4.$i:/etc/yum.repos.d/
done

虚拟机上检查yum源(41,28,33)
[root@client ~]# yum repolist
源标识 源名称 状态
local_repo CentOS 9,591
mon mon 41
osd osd 28
rhel rhel 4,986
tools tools 33
repolist: 14,679

修改/etc/hosts并同步到所有主机
[root@client ~]# cat /etc/hosts
192.168.4.10 client
192.168.4.11 node1
192.168.4.12 node2
192.168.4.13 node3
[root@client ~]# for i in {10..13}

do
scp /etc/hosts 192.168.4.$i:/etc/hosts
done

远程密码连接不询问(4台机都改)
vim /etc/ssh/ssh_config
35 StrictHostKeyChecking no

配置无密码连接
[root@node1 ~]# ssh-keygen
[root@node1 ~]# for i in {10..13}; do ssh-copy-id 192.168.4.$i; done

验证无密码连接
[root@node1 ~]# ssh node3
[root@node1 ~]# ssh node2
[root@node1 ~]# ssh client

创建NTP服务器
[root@client ~]# vim /etc/chrony.conf
server 0.centos.pool.ntp.org iburst
allow 192.168.4.0/24
local stratum 10
[root@client ~]# systemctl restart chronyd

其他所有节点与NTP服务器同步时间
[root@node1 ~]# vim /etc/chrony.conf
server 192.168.4.10 iburst
[root@node1 ~]# systemctl restart chronyd
[root@node1 ~]# for i in [12,13]

do
scp /etc/chrony.conf node$i
done

其他节点也要起服务
[root@node2 ~]# systemctl restart chronyd
[root@node3 ~]# systemctl restart chronyd

在物理机上为每个虚拟机准备3块磁盘(用图形添加10G)
[root@room9pc01 images]# virt-manager

检查三台虚拟机的磁盘
[root@node3 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 10G 0 disk
└─vda1 253:1 0 8G 0 part /
vdb 253:16 0 10G 0 disk
vdc 253:32 0 10G 0 disk
vdd 253:48 0 10G 0 disk
[root@node2 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 10G 0 disk
└─vda1 253:1 0 8G 0 part /
vdb 253:16 0 10G 0 disk
vdc 253:32 0 10G 0 disk
vdd 253:48 0 10G 0 disk
[root@node1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 10G 0 disk
└─vda1 253:1 0 8G 0 part /
vdb 253:16 0 10G 0 disk
vdc 253:32 0 10G 0 disk
vdd 253:48 0 10G 0 disk

部署ceph集群

部署软件
[root@node1 ~]# yum -y install ceph-deploy

创建目录
[root@node1 ~]# mkdir ceph-cluster
[root@node1 ~]# cd ceph-cluster

创建ceph集群配置
[root@node1 ceph-cluster]# ceph-deploy new node1 node2 node3

给所有节点安装软件包
[root@node1 ceph-cluster]# ceph-deploy install node1 node2 node3

初始化所有节点的mon服务(主机名解析必须对)
[root@node1 ceph-cluster]# ceph-deploy mon create-initial

创建OSD

准备磁盘分区(两个分区,做存储服务器的日志journal盘)
[root@node1 ceph-cluster]# parted /dev/vdb mklabel gpt
[root@node1 ceph-cluster]# parted /dev/vdb mkpart primary 1M 50%
[root@node1 ceph-cluster]# parted /dev/vdb mkpart primary 50% 100%
[root@node1 ceph-cluster]# chown ceph.ceph /dev/vdb1
[root@node1 ceph-cluster]# chown ceph.ceph /dev/vdb2

[root@node2 ~]# parted /dev/vdb mklabel gpt
[root@node2 ~]# parted /dev/vdb mkpart primary 1M 50%
[root@node2 ~]# parted /dev/vdb mkpart primary 50% 100%
[root@node2 ~]# chown ceph.ceph /dev/vdb1
[root@node2 ~]# chown ceph.ceph /dev/vdb2

[root@node3 ~]# parted /dev/vdb mklabel gpt
[root@node3 ~]# parted /dev/vdb mkpart primary 1M 50%
[root@node3 ~]# parted /dev/vdb mkpart primary 50% 100%
[root@node3 ~]# chown ceph.ceph /dev/vdb1
[root@node3 ~]# chown ceph.ceph /dev/vdb2

初始化清空磁盘数据
[root@node1 ceph-cluster]# ceph-deploy disk zap node1:vdc node1:vdd
[root@node1 ceph-cluster]# ceph-deploy disk zap node2:vdc node2:vdd
[root@node1 ceph-cluster]# ceph-deploy disk zap node3:vdc node3:vdd

创建OSD存储空间
[root@node1 ceph-cluster]# ceph-deploy osd create node1:vdc:/dev/vdb1 node1:vdd:/dev/vdb2
[root@node1 ceph-cluster]# ceph-deploy osd create node2:vdc:/dev/vdb1 node2:vdd:/dev/vdb2
[root@node1 ceph-cluster]# ceph-deploy osd create node3:vdc:/dev/vdb1 node3:vdd:/dev/vdb2

查看集群状态
[root@node1 ceph-cluster]# ceph -s
cluster da664a54-3e04-4994-908a-5a4ee0d98b88
health HEALTH_OK
monmap e1: 3 mons at {node1=192.168.4.11:6789/0,node2=192.168.4.12:6789/0,node3=192.168.4.13:6789/0}
election epoch 8, quorum 0,1,2 node1,node2,node3
osdmap e33: 6 osds: 6 up, 6 in
flags sortbitwise
pgmap v74: 64 pgs, 1 pools, 0 bytes data, 0 objects
202 MB used, 61170 MB / 61373 MB avail
64 active+clean
注:health HEALTH_OK 常见的错误有时间步同步,vdb1 和vdb2的所属者及所属组问题等

创建ceph块存储

创建镜像

查看存储池
[[root@node1 ceph-cluster]# ceph osd lspools
0 rbd,

创建镜像,查看镜像
[root@node1 ceph-cluster]# rbd create demo-image –image-feature layering –size 10G
[root@node1 ceph-cluster]# rbd create rbd/image –image-feature layering –size 10G
[root@node1 ceph-cluster]# rbd list
demo-image
image
[root@node1 ceph-cluster]# rbd info demo-image
rbd image ‘demo-image’:
size 10240 MB in 2560 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.1034238e1f29
format: 2
features: layering
flags:
[root@node1 ceph-cluster]# rbd info image
rbd image ‘image’:
size 10240 MB in 2560 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.1037238e1f29
format: 2
features: layering
flags:

动态调整

缩小容量(不建议,容易造成数据丢失)
[root@node1 ceph-cluster]# rbd resize –size 7G image –allow-shrink
Resizing image: 100% complete…done.
[root@node1 ceph-cluster]# rbd info image
rbd image ‘image’:
size 7168 MB in 1792 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.1037238e1f29
format: 2
features: layering
flags:

扩大容量
[root@node1 ceph-cluster]# rbd resize –size 15G image
Resizing image: 100% complete…done.
[root@node1 ceph-cluster]# rbd info image
rbd image ‘image’:
size 15360 MB in 3840 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.1037238e1f29
format: 2
features: layering
flags:

通过KRBD访问

将集群内镜像映射为本地磁盘
[root@node1 ceph-cluster]# rbd map demo-image
/dev/rbd0
[root@node1 ceph-cluster]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 20G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 19G 0 part
├─rhel-root 253:0 0 17G 0 lvm /
└─rhel-swap 253:1 0 2G 0 lvm [SWAP]
vdb 252:16 0 10G 0 disk
├─vdb1 252:17 0 5G 0 part
└─vdb2 252:18 0 5G 0 part
vdc 252:32 0 10G 0 disk
└─vdc1 252:33 0 10G 0 part /var/lib/ceph/osd/ceph-0
vdd 252:48 0 10G 0 disk
└─vdd1 252:49 0 10G 0 part /var/lib/ceph/osd/ceph-1
rbd0 251:0 0 10G 0 disk
[root@node1 ceph-cluster]# mkfs.xfs /dev/rbd0
[root@node1 ceph-cluster]# mount /dev/rbd0 /mnt

客户端通过KRBD访问(装ceph-common包,拷配置文件:集群的位置,拷连接密钥:连接权限)
[root@client ~]# yum -y install ceph-common
[root@client ~]# scp 192.168.4.11:/etc/ceph/ceph.conf /etc/ceph/
[root@client ~]# scp 192.168.4.11:/etc/ceph/ceph.client.admin.keyring /etc/ceph/
[root@client ~]# rbd map image
/dev/rbd0
[root@client ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 20G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 19G 0 part
├─rhel-root 253:0 0 17G 0 lvm /
└─rhel-swap 253:1 0 2G 0 lvm [SWAP]
rbd0 251:0 0 15G 0 disk
[root@client ~]# rbd showmapped
id pool image snap device
0 rbd image - /dev/rbd0

客户端格式化,挂载分区
[root@client ~]# mkfs.xfs /dev/rbd0
[root@client ~]# mount /dev/rbd0 /mnt/
[root@client ~]# echo 123456 > /mnt/1.txt
[root@client ~]# ls /mnt/1.txt
/mnt/1.txt

创建镜像快照

查看镜像快照(没有镜像快照)
[root@node1 ~]# rbd snap ls image

创建镜像快照
[root@node1 ~]# rbd snap create image –snap image-snap1
[root@node1 ~]# rbd snap ls image
SNAPID NAME SIZE
4 image-snap1 15360 MB

在客户端删除写入的测试文件
[root@client ~]# ls /mnt
1.txt
[root@client ~]# rm -rf /mnt/1.txt
[root@client ~]# ls /mnt

还原快照
[root@node1 ~]# rbd snap rollback image –snap image-snap1
Rolling back to snapshot: 100% complete…done.

客户端重新挂载分区,再检查文件
[root@client ~]# umount /mnt
[root@client ~]# mount /dev/rbd0 /mnt/
[root@client ~]# ls /mnt

创建快照克隆

克隆快照
[root@node1 ~]# rbd snap protect image –snap image-snap1
[root@node1 ~]# rbd snap rm image –snap image-snap1(失败)
rbd: snapshot ‘image-snap1’ is protected from removal.
2018-09-08 16:21:21.441808 7f72046b3d80 -1 librbd::Operations: snapshot is protected
[root@node1 ~]# rbd clone image –snap image-snap1 image-clone –image-feature layering

查看克隆镜像与父镜像快照的关系
[root@node1 ~]# rbd info image-clone
rbd image ‘image-clone’:
size 15360 MB in 3840 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.104d238e1f29
format: 2
features: layering
flags:
parent: rbd/image@image-snap1
overlap: 15360 MB
克隆#克隆镜像很多数据都来自于快照链
如果希望克隆镜像可以独立工作,就需要将父快照中的数据,全部拷贝一份,但比较耗时
[root@node1 ~]# rbd flatten image-clone
Image flatten: 100% complete…done.
[root@node1 ~]# rbd info image-clone
rbd image ‘image-clone’:
size 15360 MB in 3840 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.104d238e1f29
format: 2
features: layering
flags:
父快照没有了

客户端撤销磁盘映射
[root@client ~]# umount /mnt
[root@client ~]# rbd showmapped
id pool image snap device
0 rbd image - /dev/rbd0
[root@client ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 20G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 19G 0 part
├─rhel-root 253:0 0 17G 0 lvm /
└─rhel-swap 253:1 0 2G 0 lvm [SWAP]
rbd0 251:0 0 15G 0 disk
[root@client ~]#
[root@client ~]# rbd unmap /dev/rbd/rbd/image
[root@client ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 20G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 19G 0 part
├─rhel-root 253:0 0 17G 0 lvm /
└─rhel-swap 253:1 0 2G 0 lvm [SWAP]

删除快照与镜像
[root@node1 ~]# rbd snap rm image –snap image-snap
[root@node1 ~]# rbd list
[root@node1 ~]# rbd rm image

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值