1.创建基于hdd的class rule
ceph osd crush rule create-replicated hdd_rule default host hdd
2.创建基于ssd的class rule
ceph osd crush rule create-replicated ssd_rule default host ssd
#查看现有crush rule
ceph osd crush rule ls
#查看具体规则详细信息
ceph osd crush rule dump replicated_rule
3.重新编译crushmap,修改默认的副本crush rule(replicated_rule)
- 实现已建数据池(pools)不再使用ssd做为存储
#拷贝admin.keyring至mon容器
docker cp /etc/ceph/ceph.client.admin.keyring ab80b0631d16:/etc/ceph/
#获取crushmap
docker exec ab80b0631d16 ceph osd getcrushmap -o /etc/ceph/crushmap.o
#反编译crushmap
docker exec ab80b0631d16 crushtool -d /etc/ceph/crushmap.o -o /etc/ceph/crushmap.txt
#修改crushmap
vi /ect/ceph/crushmap.txt
rule replicated_rule {
id 0
type replicated
min_size 1
max_size 10
step take default class hdd
step chooseleaf firstn 0 type host
step emit
}
#重新设置crushmap
docker exec fe597c666e5b crushtool -c /etc/ceph/crushmap.txt -o /etc/ceph/crushmap.o
docker exec fe597c666e5b ceph osd setcrushmap -i /etc/ceph/crushmap.o
4.创建基本层数据池
ceph osd pool create cold-storage 32 32 hdd_rule
5.创建缓存层数据池
ceph osd pool create hot-storage 32 32 ssd_rule
5.1 创建后pg_num与预设不一致时,通过如下命令修改pg_num
ceph osd pool set <pool-name> pg_num 64
ceph osd pool set <pool-name> pgp_num 64
ceph osd pool set <pool-name> pg_num_min 64
6.设置缓存层
#回写模式(WRITEBACK)缓存层设置
#将 hot-storage 放置到 cold-storage 前端
ceph osd tier add cold-storage hot-storage
# 设置缓存模式为 writeback
ceph osd tier cache-mode hot-storage writeback
# 将所有客户端请求从标准池引导至缓存池
ceph osd tier set-overlay cold-storage hot-storage
7.设置缓存层的基本配置
ceph osd pool set kubernetes-hot hit_set_type bloom
ceph osd pool set kubernetes-hot hit_set_count 12
ceph osd pool set kubernetes-hot hit_set_period 14400
ceph osd pool set kubernetes-hot min_read_recency_for_promote 2
ceph osd pool set kubernetes-hot min_write_recency_for_promote 2
#以 1 TB 刷新或驱逐(100M:104857600)
ceph osd pool set kubernetes-hot target_max_bytes 104857600
#要刷新或驱逐 1M 个对象
ceph osd pool set kubernetes-hot target_max_objects 1000000
#达到缓存池容量的 40% 时开始刷新已修改(脏)对象
ceph osd pool set kubernetes-hot cache_target_dirty_ratio 0.2
#达到缓存池容量的 60% 时开始积极刷新脏对象
ceph osd pool set kubernetes-hot cache_target_dirty_high_ratio 0.4
#达到缓存池容量的 80% 时开始刷新未修改(干净)的对象
ceph osd pool set kubernetes-hot cache_target_full_ratio 0.6
#在10分钟后刷新已修改(或脏)的对象到后备存储池
ceph osd pool set kubernetes-hot cache_min_flush_age 600
#30分钟后将指定对象从缓存层中驱逐
ceph osd pool set kubernetes-hot cache_min_evict_age 1800
8.将池关联到应用
ceph osd pool application enable cold-storage rgw
9.删除写回缓存层
#将缓存模式更改为readproxy以便新的和修改过的对象将刷新到后备存储池
[root@ceph-node1 ~]# ceph osd tier cache-mode hot-storage readproxy
set cache-mode for pool 'hot-storage' to readproxy
#确保缓存池已被刷新
[root@ceph-node1 ~]# rados -p hot-storage ls
#手动刷新缓存层对象
[root@ceph-node1 ~]# rados -p hot-storage cache-flush-evict-all
#移除覆盖层,以便客户端不会将流量定向到缓存
[root@ceph-node1 ~]# ceph osd tier remove-overlay cold-storage
there is now (or already was) no overlay for 'cold-storage'
#从后备存储池中删除缓存层池
[root@ceph-node1 ~]# ceph osd tier remove cold-storage hot-storage
pool 'hot-storage' is now (or already was) not a tier of 'cold-storage'
#删除hot-storage
[root@ceph-node1 ~]# ceph osd pool rm hot-storage hot-storage --yes-i-really-really-mean-it
#删除cold-storage
[root@ceph-node1 ~]# ceph osd pool rm hot-storage cold-storage --yes-i-really-really-mean-it