linux资源池,Ceph pool 资源池管理

Ceph Pool 资源池管理

# 查看 ceph 资源池

ceph osd lspools

# 创建资源池

osd pool create {} {replicated|erasure} {} {} {} {} {} {} {} : create pool

[root@node1 ~]# ceph osd pool create pool_demo 16 16

pool 'pool_demo' created

[root@node1 ~]# ceph osd lspools

1 ceph-demo

2 .rgw.root

3 default.rgw.control

4 default.rgw.meta

5 default.rgw.log

6 cephfs_data

7 cephfs_metadata

8 pool_demo

# 修改资源池属性

[root@node1 ~]# ceph osd pool get pool_demo size

size: 3

[root@node1 ~]# ceph osd pool get pool_demo pg_num

pg_num: 16

[root@node1 ~]# ceph osd pool get pool_demo pgp_num

pgp_num: 16

[root@node1 ~]# ceph osd pool set pool_demo pg_num 32

set pool 8 pg_num to 32

[root@node1 ~]# ceph osd pool set pool_demo pgp_num 32

set pool 8 pgp_num to 32

# 将资源池关联到应用程序

ceph osd pool application enable {pool-name} {application-name}

# application-name=rbd|cephfs|rgw

[root@node1 ~]# ceph osd pool application enable pool_demo rbd

enabled application 'rbd' on pool 'pool_demo'

[root@node1 ~]# ceph osd pool application get pool_demo

{

"rbd": {}

}

# 设置池配额 为每个池的最大字节数和/或最大对象数设置池配额

[root@node1 ~]# ceph osd pool get-quota pool_demo

quotas for pool 'pool_demo':

max objects: N/A

max bytes : N/A

[root@node1 ~]# ceph osd pool set-quota pool_demo max objects 100

Invalid command: max not in max_objects|max_bytes

osd pool set-quota max_objects|max_bytes : set object or byte limit on pool

Error EINVAL: invalid command

[root@node1 ~]# ceph osd pool set-quota pool_demo max_objects 100

set-quota max_objects = 100 for pool pool_demo

[root@node1 ~]# ceph osd pool get-quota pool_demo

quotas for pool 'pool_demo':

max objects: 100 objects

max bytes : N/A

# 删除池

ceph osd pool delete {pool-name} [{pool-name} --yes-i-really-really-mean-it]

若要删除池,mon_allow_pool_delete配置中必须将"该标志"设置为 true。否则,他们将拒绝删除池。

如果您为创建的池创建了自己的规则,则在不再需要池时应考虑删除这些规则:

ceph osd pool get {pool-name} crush_rule

例如,如果规则为"123",您可以检查其他池,例如:

ceph osd dump | grep "^pool" | grep "crush_rule 123"

如果没有其他池使用该自定义规则,则从群集中删除该规则是安全的。

如果您为不再存在的池创建具有严格权限的用户,则应考虑删除这些用户:

ceph auth ls | grep -C 5 {pool-name}

ceph auth del {user}

示例:删除pool_demo

[root@node1 ~]# ceph osd pool delete pool_demo pool_demo --yes-i-really-really-mean-it-not-faking

Error EPERM: pool deletion is disabled; you must first set the mon_allow_pool_delete config option to true before you can destroy a pool

临时修改 mon_allow_pool_delete 属性

[root@node1 ~]# ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok config show | grep mon_allow_pool_delete

"mon_allow_pool_delete": "false",

[root@node1 ~]# ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok config set mon_allow_pool_delete true

{

"success": "mon_allow_pool_delete = 'true' "

}

[root@node1 ~]# ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok config show | grep mon_allow_pool_delete

"mon_allow_pool_delete": "true",

[root@node1 ~]# ceph osd pool delete pool_demo pool_demo --yes-i-really-really-mean-it-not-faking

pool 'pool_demo' removed

永久修改 mon_allow_pool_delete 属性

[root@node1 my-cluster]# vim ceph.conf

[root@node1 my-cluster]# cat ceph.conf

[global]

fsid = 3f5560c6-3af3-4983-89ec-924e8eaa9e06

public_network = 192.168.6.0/24

cluster_network = 172.16.79.0/16

mon_initial_members = node1

mon_host = 192.168.6.160

auth_cluster_required = cephx

auth_service_required = cephx

auth_client_required = cephx

mon_allow_pool_delete = true # 添加此行属性,然后重新分发至各节点

[client.rgw.node1]

rgw_frontends = "civetweb port=80"

分发至各 osd 节点

[root@node1 my-cluster]# ceph-deploy --overwrite-conf config push node1 node2 node3

重启 ceph-mon 服务

[root@node1 my-cluster]# ssh node1 systemctl restart ceph-mon.target

[root@node1 my-cluster]# ssh node2 systemctl restart ceph-mon.target

[root@node1 my-cluster]# ssh node3 systemctl restart ceph-mon.target

查看属性为 true 配置生效

[root@node2 ~]# ceph --admin-daemon /var/run/ceph/ceph-mon.node2.asok config show | grep mon_allow_pool_delete

"mon_allow_pool_delete": "true",

# 重命名池

ceph osd pool rename {current-pool-name} {new-pool-name}

如果重命名池,并且具有经过身份验证的用户的每个池功能,则必须使用新的池名称更新用户的功能(即大写字母)。

# 显示池统计信息

rados df

ceph osd pool stats [{pool-name}]

[root@node1 ~]# rados df

POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS RD WR_OPS WR USED COMPR UNDER COMPR

.rgw.root 768 KiB 4 0 12 0 0 0 0 0 B 4 4 KiB 0 B 0 B

ceph-demo 653 MiB 79 0 237 0 0 0 1684 5.6 MiB 285 225 MiB 0 B 0 B

cephfs_data 0 B 0 0 0 0 0 0 0 0 B 0 0 B 0 B 0 B

cephfs_metadata 1.5 MiB 22 0 66 0 0 0 0 0 B 49 17 KiB 0 B 0 B

default.rgw.control 0 B 8 0 24 0 0 0 0 0 B 0 0 B 0 B 0 B

default.rgw.log 0 B 175 0 525 0 0 0 63063 61 MiB 42053 0 B 0 B 0 B

default.rgw.meta 0 B 0 0 0 0 0 0 0 0 B 0 0 B 0 B 0 B

pool_demo 0 B 0 0 0 0 0 0 0 0 B 0 0 B 0 B 0 B

total_objects 288

total_used 6.7 GiB

total_avail 143 GiB

total_space 150 GiB

[root@node1 ~]# ceph osd pool stats pool_demo

pool pool_demo id 8

nothing is going on

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值