ceph中通过crushmap将osd节点划分成两个独立的资源组,提供给openstack

需求,将一个ceph集群中的osd主机,划分成两个独立区域,去对接不同类型openstack计算节点,实现资源层面的隔离

主机名

IP地址

磁盘

rack

ceph1

198.18.2.137

osd.1,osd.2,osd.3,osd.4,osd.5

rack1

ceph2

198.18.2.138

osd.0

rack2

在Ceph集群中,可以通过配置不同的 CRUSH 规则 和 pool 来实现将 多个 个节点分成两个组,并且让这两组对应不同的存储 pool,实现数据隔离。

1.导出CRUSHMAP

首先,可以查看并导出当前的 CRUSH map:

ceph osd getcrushmap -o crushmap
crushtool -d crushmap -o crushmap.txt

.2.在bucket中添加两个机架规则

在 buckets 部分中定义两个新的机架 bucket,并将每个机架中的节点分配给相应的 bucket。

rack rack1 {
        id -6           # do not change unnecessarily
        # weight 1.364
        alg straw2
        hash 0  # rjenkins1
        item ceph1 weight 1.0
}

rack rack2 {
        id -7           # do not change unnecessarily
        # weight 1.364
        alg straw2
        hash 0  # rjenkins1
        item ceph2 weight 1.0
}

3. 在根(root)层级中添加机架

现在,您需要将这两个机架添加到 root 级别的 default bucket 中,以便它们成为集群拓扑的一部分。


root default {
 id -1  # do not change unnecessarily
 # weight 2.998
 alg straw2
 hash 0 # rjenkins1
 item rack1 weight 1.634
 item rack2 weight 1.364
}

4. 定义新的 CRUSH 规则

为每个环境(即每个机架)定义一个新的 CRUSH 规则,确保数据写入时根据规则分配到不同的机架。

在 rules 部分中,添加两个新的规则,每个规则分别对应一个机架:

rule rack1_rule {
id 1
type replicated
min_size 1
max_size 10
step take rack1
step chooseleaf firstn 0 type host
step emit
}
rule rack2_rule {
id 1
type replicated
min_size 1
max_size 10
step take rack2
step chooseleaf firstn 0 type host
step emit
}

完整的crushmap

# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable chooseleaf_stable 1
tunable straw_calc_version 1
tunable allowed_bucket_algs 54

# devices
device 0 osd.0 class hdd
device 1 osd.1 class hdd
device 2 osd.2 class hdd
device 3 osd.3 class hdd
device 4 osd.4 class hdd
device 5 osd.5 class hdd

# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 zone
type 10 region
type 11 root

# buckets
host ceph1 {
	id -5		# do not change unnecessarily
	id -2 class hdd		# do not change unnecessarily
	# weight 1.365
	alg straw2
	hash 0	# rjenkins1
	item osd.1 weight 0.273
	item osd.2 weight 0.273
	item osd.3 weight 0.273
	item osd.4 weight 0.273
	item osd.5 weight 0.273
}
rack rack1 {
	id -6		# do not change unnecessarily
	id -4 class hdd		# do not change unnecessarily
	# weight 1.000
	alg straw2
	hash 0	# rjenkins1
	item ceph1 weight 1.000
}
host ceph2 {
	id -3		# do not change unnecessarily
	id -8 class hdd		# do not change unnecessarily
	# weight 1.634
	alg straw2
	hash 0	# rjenkins1
	item osd.0 weight 1.634
}
rack rack2 {
	id -7		# do not change unnecessarily
	id -9 class hdd		# do not change unnecessarily
	# weight 1.000
	alg straw2
	hash 0	# rjenkins1
	item ceph2 weight 1.000
}
root default {
	id -1		# do not change unnecessarily
	id -10 class hdd		# do not change unnecessarily
	# weight 2.998
	alg straw2
	hash 0	# rjenkins1
	item rack1 weight 1.634
	item rack2 weight 1.364
}

# rules
rule replicated_rule {
	id 0
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}
rule rack1_rule {
	id 1
	type replicated
	min_size 1
	max_size 10
	step take rack1
	step chooseleaf firstn 0 type host
	step emit
}
rule rack2_rule {
	id 2
	type replicated
	min_size 1
	max_size 10
	step take rack2
	step chooseleaf firstn 0 type host
	step emit
}

# end crush map

5.更新修改后的crushmap

完成编辑后,将 crushmap.txt 文件编译回二进制格式,并将其应用到Ceph集群中。

crushtool -c crushmap.txt -o newcrushmap
ceph osd setcrushmap -i newcrushmap

修改后的map结构

[root@ceph1 media]# ceph osd  tree
ID  CLASS  WEIGHT   TYPE NAME           STATUS  REWEIGHT  PRI-AFF
-1         2.99799  root default
-6         1.63399      rack rack1
-5         1.00000          host ceph1
1    hdd  0.27299              osd.1       up   1.00000  1.00000
2    hdd  0.27299              osd.2       up   1.00000  1.00000
3    hdd  0.27299              osd.3       up   1.00000  1.00000
4    hdd  0.27299              osd.4       up   1.00000  1.00000
5    hdd  0.27299              osd.5       up   1.00000  1.00000
-7         1.36400      rack rack2
-3         1.00000          host ceph2
0    hdd  1.63399              osd.0       up   1.00000  1.00000

6. 创建不同的 pool 并应用 CRUSH 规则

ceph osd pool create vms  128 128 replicated rack1_rule
ceph osd pool create images  128 128 replicated rack1_rule
ceph osd pool create rack1-volumes  128 128 replicated rack1_rule
ceph osd pool create rack2-volumes  128 128 replicated rack2_rule

7.创建cinder和glance的用户提供到openstack用做对接

ceph auth add client.cinder mon 'profile rbd' osd 'profile rbd'
ceph auth add client.glance mon 'profile rbd' osd 'profile rbd'

8.创建认证文件

ceph auth get client.cinder > /etc/ceph/ceph.client.cinder.keyring
ceph auth get client.glance > /etc/ceph/ceph.client.glance.keyring

9.配置 OpenStack 与 Ceph 对接配置

# 配置ceph与openstack glance服务对接
# 创建 /etc/kolla/config/glance 目录
mkdir -p /etc/kolla/config/glance
# copyceph.client.glance.keyring 到/etc/kolla/config/glance/ 与glance服务对接
cp /etc/ceph/ceph.client.glance.keyring /etc/kolla/config/glance/
# 复制ceph配置文件到/etc/kolla/config/glance/
cp /etc/ceph/ceph.conf /etc/kolla/config/glance/
# 创建cinder目录
mkdir -p /etc/kolla/config/cinder/cinder-volume
# 编辑cinder文件
vim /etc/kolla/config/cinder/cinder-volume.conf

[DEFAULT]
enabled_backends=rack1,rack2
[rack1]
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
backend_host = rbd:rack1-volumes
rbd_pool = rack1-volumes
volume_backend_name = rack1
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_secret_uuid = 7dfa8419-51c9-4d55-b544-8de0aadf0a98


[rack2]
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
backend_host = rbd:rack2-volumes
rbd_pool = rack2-volumes
volume_backend_name = rack2
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_secret_uuid = 7dfa8419-51c9-4d55-b544-8de0aadf0a98# 查看 /etc/kolla/passwords.yml 中对应的 cinder_rbd_secret_uuid


# 配置ceph与openstack cinder 服务对接
cp /etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-volume
cp /etc/ceph/ceph.conf /etc/kolla/config/cinder/
# 配置ceph与openstack nova 服务对接
mkdir -p /etc/kolla/config/nova
cp /etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/nova/
cp /etc/ceph/ceph.conf /etc/kolla/config/nova/


# 开启 Glance 、 Cinder 和 Nova 的后端 Ceph 功能
cat >>  /opt/oslostack/oslostack.yml <<EOF
glance_backend_ceph: "yes"
cinder_backend_ceph: "yes"
nova_backend_ceph: "yes"
EOF
# 重新部署nova cinder glance
 kolla-ansible -i /opt/oslostack/inventory -e @/opt/oslostack/oslostack.yml deploy -t nova -t cinder -t glance
 
 # 创建volume type 对应到不同的backend
openstack volume type create --description "rack1 backed" rack1
openstack volume type set --property volume_backend_name=rack1 rack1
openstack volume type create --description "rack2 backed" rack2
openstack volume type set --property volume_backend_name=rack2 rack2

10,创建虚拟机测试,使用不同backend,volume调度到不同的pool里

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值