设置新的placement,为用户提供高速的ssd存储

设置新的placement,为用户提供高速的ssd存储

创建使用ssd的pool

检查磁盘

[root@node1 ~]# docker exec mon ceph osd tree
ID  CLASS WEIGHT   TYPE NAME      STATUS REWEIGHT PRI-AFF 
 -1       27.95123 root default                           
 -3        9.31708     host node1                         
  0   hdd  7.27769         osd.0      up  1.00000 1.00000 
  1   hdd  1.82019         osd.1      up  1.00000 1.00000 
  2   ssd  0.21919         osd.2      up  1.00000 1.00000 
 -7        9.31708     host node2                         
  3   hdd  7.27769         osd.3      up  1.00000 1.00000 
  4   hdd  1.82019         osd.4      up  1.00000 1.00000 
  5   ssd  0.21919         osd.5      up  1.00000 1.00000 
-10        9.31708     host node3                         
  6   hdd  7.27769         osd.6      up  1.00000 1.00000 
  7   hdd  1.82019         osd.7      up  1.00000 1.00000 
  8   ssd  0.21919         osd.8      up  1.00000 1.00000 

查看集群的crush rule

[root@node1 ~]# docker exec mon ceph osd crush rule ls
replicated_rule

创建新的crush rule

docker exec mon ceph osd crush rule create-replicated ssd_rule default host ssd 

再次查看集群的crush rule

[root@node1 ~]# docker exec mon ceph osd crush rule ls
replicated_rule
ssd_rule

获取crushmap

docker exec mon ceph osd getcrushmap -o /etc/ceph/crushmap.o
docker exec mon crushtool -d /etc/ceph/crushmap.o -o /etc/ceph/crushmap.txt

修改crushmap(相当于让之前的pools不再使用ssd做为存储

rule replicated_rule {
        id 0
        type replicated
        min_size 1
        max_size 10
        step take default class hdd
        step chooseleaf firstn 0 type host
        step emit
}

重新设置crushmap

docker exec mon crushtool -c /etc/ceph/crushmap.txt -o /etc/ceph/crushmap.o
docker exec mon ceph osd setcrushmap -i /etc/ceph/crushmap.o

创建专属使用ssd作为存储的pools

docker exec mon ceph osd pool create ssd.rgw.buckets.index 32 32 replicated ssd_rule
docker exec mon ceph osd pool create ssd.rgw.buckets.data 64 64 replicated ssd_rule
docker exec mon ceph osd pool create ssd.rgw.buckets.non-ec 32 32 replicated ssd_rule

将新创建的pools关联到rgw上

docker exec mon ceph osd pool application enable ssd.rgw.buckets.index rgw
docker exec mon ceph osd pool application enable ssd.rgw.buckets.data rgw
docker exec mon ceph osd pool application enable ssd.rgw.buckets.non-ec rgw

检查集群中的pools

[root@node1 ~]# docker exec mon ceph osd pool ls detail
pool 1 '.rgw.root' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 62 lfor 0/60 flags hashpspool stripe_width 0 application rgw
pool 2 'default.rgw.control' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 66 lfor 0/64 flags hashpspool stripe_width 0 application rgw
pool 3 'default.rgw.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 70 lfor 0/68 flags hashpspool stripe_width 0 application rgw
pool 4 'default.rgw.log' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 74 lfor 0/72 flags hashpspool stripe_width 0 application rgw
pool 7 'default.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 92 flags hashpspool stripe_width 0 application rgw
pool 8 'default.rgw.buckets.data' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 256 pgp_num 256 last_change 93 flags hashpspool stripe_width 0 application rgw
pool 9 'default.rgw.buckets.non-ec' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 94 flags hashpspool stripe_width 0 application rgw
pool 10 'ssd.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 1 object_hash rjenkins pg_num 32 pgp_num 32 last_change 116 flags hashpspool stripe_width 0 application rgw
pool 11 'ssd.rgw.buckets.data' replicated size 3 min_size 2 crush_rule 1 object_hash rjenkins pg_num 64 pgp_num 64 last_change 117 flags hashpspool stripe_width 0 application rgw
pool 12 'ssd.rgw.buckets.non-ec' replicated size 3 min_size 2 crush_rule 1 object_hash rjenkins pg_num 32 pgp_num 32 last_change 118 flags hashpspool stripe_width 0 application rgw

设置新的placement

增加zone的placement

docker exec rgw radosgw-admin zone placement add --rgw-zone=default --placement-id=ssd-placement --index_pool=ssd.rgw.buckets.index --data_pool=ssd.rgw.buckets.data --data_extra_pool=ssd.rgw.buckets.non-ec --placement-index-type=normal

检查zone的placement结果

[root@node1 ~]# docker exec rgw radosgw-admin zone placement list --rgw-zone=default
[
    {
        "key": "default-placement",
        "val": {
            "index_pool": "default.rgw.buckets.index",
            "data_pool": "default.rgw.buckets.data",
            "data_extra_pool": "default.rgw.buckets.non-ec",
            "index_type": 0,
            "compression": ""
        }
    },
    {
        "key": "ssd-placement",
        "val": {
            "index_pool": "ssd.rgw.buckets.index",
            "data_pool": "ssd.rgw.buckets.data",
            "data_extra_pool": "ssd.rgw.buckets.non-ec",
            "index_type": 0,
            "compression": ""
        }
    }
]

增加zonegroup的placement

docker exec rgw radosgw-admin zonegroup placement add --rgw-zonegroup=default --placement-id=ssd-placement

获取用户信息

docker exec rgw radosgw-admin metadata get user:ssd_admin > /etc/ceph/ssd_admin.info

修改/etc/ceph/ssd_admin.info文件

"default_placement": "ssd-placement",
"placement_tags": [
                "ssd-placement"
        ],

设置用户信息

docker exec -i rgw radosgw-admin metadata put user:ssd_admin < /etc/ceph/ssd_admin.info

重启rgw服务

docker restart rgw
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值