Ceph - howto, rbd, cluster

添加pool

# 创建pool

ceph osd pool create mypool 512

# 设置pool replica

ceph osd pool set mypool size 3            # 最大replica

ceph osd pool set mypool min_size 2    # 最小replica

删除pool

ceph osd pool delete mypool --yes-i-really-really-mean-it! pool 'mypool' removed

添加OSD

创建osd journal分区

# 创建大小为4G的journal
sgdisk --new="3:0:+4G" -c "3:osd.8 journal" /dev/sde

# 查看分区
sgdisk -p /dev/sde

添加OSD.*

## 格式化硬盘
mkfs.xfs -f /dev/sdd

## 创建disk,输出osd number
ceph osd create 8

## 创建osd data
mkdir -p /var/lib/ceph/osd/osd.8 

## mount osd data
mount /dev/sdd /var/lib/ceph/osd/osd.8

## 初始化osd data,指定osd journal! 
ceph-osd -i 8 --osd-data=/var/lib/ceph/osd/osd.8 --osd-journal=/dev/ sde3 --mkfs --mkkey —mkjournal 

## 添加OSD authentication key 
ceph auth add osd.8 osd ‘allow *’ mon 'allow profile osd’ -i /var/lib/ ceph/osd/osd.8/keyring 

## 将osd.8添加到crushmap中 
ceph osd crush add osd.8 2 host=ceph03 

## 将osd.8添加到ceph.conf中,并复制到其它节点,启动osd.8
/etc/rc.d/init.d/ceph start osd.8

删除OSD

查看OSD tree

ceph osd tree

删除OSD.8

## 设置osd.8 out
ceph osd out 8

## 关闭osd.8服务
/etc/rc.d/init.d/ceph stop osd.8

## 将osd.8从crushmap删除
ceph osd crush remove osd.8 

## 将osd.8从auth中删除 
ceph auth del osd.8 

## 删除disk
ceph osd rm 8 

## umount osd.8 data目录
umount /var/lib/ceph/osd/osd.8 

## 删除ceph.conf配置⽂文件中的osd.8,并将ceph.conf复制到其它节点

添加MON

创建目录,获得keys,配置ceph.conf

## 创建mon目录
mkdir -p /var/lib/ceph/mon/mon.ceph03 

## get keyring
ceph auth get mon. -o /tmp/monkeyring 

## get mon map 
ceph mon getmap -o /tmp/monmap 

## 初始化mon data 
ceph-mon -i ceph03 --mkfs --monmap /tmp/monmap --keyring /tmp/ monkeyring 

## 添加新的mon到monmap中 
ceph mon add ceph03 192.168.15.62:6789 

## 启动mon并加⼊入集群 
ceph-mon -i ceph03 --public-addr 192.168.15.62:6789

添加新的MON到ceph.conf,并复制到其他节点

[mon] 
… 
[mon.ceph03]    
    host                       = ceph03    
    mon addr                   = 192.168.15.62:6789 
…

删除MON

停止MON,从集群中删除MON,修改ceph.conf

## 停⽌止mon 
/etc/rc.d/init.d/ceph stop mon 

## 删除mon 
ceph mon remove ceph03 

## 删除ceph.conf中的mon.ceph03删除,并复制ceph.conf到其它节点

RBD Volumes映射

## 在pool名为mypool中创建rbd 
rbd create mypool/disk01 --size 1000 

## 查看rbd 
rbd info mypool/disk01 

## 映射rbd,映射后的rbd相当于本地磁盘
rbd map mypool/disk01 

## 查看映射的rbd
rbd showmapped  > 1  mypool disk01 -    /dev/rbd1 

## 格式化/dev/rbd1,挂载使⽤用 
mkfs.xfs /dev/rbd1 
mkdir /mnt/disk01 
mount /dev/rbd1 /mnt/disk01 

## 添加rbd容量 
rbd resize --size 15000 mypool/disk01 
xfs_growfs /dev/rbd1

取消RBD Volumes映射!

umount /dev/rbd1
rbd unmap /dev/rbd1

Cache pool

导出crushmap

## Get a crushmap
ceph osd getcrushmap -o /tmp/crushmap

## decompile a crushmap
crushtool -d /tmp/crushmap -o /tmp/crushmap.txt

编辑crushmap

… 
# buckets
host ceph01 {
        id -2           # do not change unnecessarily
        # weight 6.000
        alg straw
        hash 0  # rjenkins1
        item osd.0 weight 2.000
        item osd.1 weight 2.000
        item osd.2 weight 2.000
 }
host ceph02 {
        id -3           # do not change unnecessarily
        # weight 6.000
        alg straw
        hash 0  # rjenkins1
        item osd.3 weight 2.000
        item osd.4 weight 2.000
        item osd.5 weight 2.000
 }
host ceph03 {
        id -4           # do not change unnecessarily
        # weight 6.000
        alg straw
        hash 0  # rjenkins1
        item osd.6 weight 2.000
        item osd.7 weight 2.000
        item osd.8 weight 2.000
 }
root platter {
        id -1           # do not change unnecessarily
        # weight 12.000
        alg straw
        hash 0  # rjenkins1
        item ceph02 weight 6.000
        item ceph03 weight 6.000
 }
root ssd {
        id -5           # do not change unnecessarily
        # weight 6.000
        alg straw
        hash 0  # rjenkins1
        item ceph01 weight 6.000
 }

# rules
rule replicated_ruleset {
        ruleset 0
        type replicated
        min_size 1
        max_size 10
        step take platter
        step chooseleaf firstn 0 type host
        step emit
 }

rule metadata {
      ruleset 1
      type replicated
      min_size 0
      max_size 10
      step take platter
      step chooseleaf firstn 0 type host
      step emit
}
rule rbd {
      ruleset 2
      type replicated
      min_size 0
      max_size 10
      step take platter
      step chooseleaf firstn 0 type host
      step emit
}

rule platter {
      ruleset 3
      type replicated
      min_size 0
      max_size 10
      step take platter
      step chooseleaf firstn 0 type host
      step emit
}

rule ssd {
      ruleset 4
      type replicated
      min_size 0
      max_size 4
      step take ssd
      step chooseleaf firstn 0 type host
      step emit
}
… 

导入crushmap

## complie a crushmap
crushtool -c /tmp/crushmap.txt -o /tmp/crushmap.new

## set a crushmap
ceph osd setcrushmap -i /tmp/crushmap.new

创建cache pool

## 创建pool
ceph osd pool create foo 256

## 创建cache pool,指定ruleset为ssd
ceph osd pool create foo-hot 256
ceph osd pool set foo-hot crush_ruleset 4

## 设置cache pool replica⼤大⼩小
ceph osd pool set foo-hot size 2
ceph osd pool set foo-hot min_size 1

## 设置foo-hot作为foo的cache pool
ceph osd tier add foo foo-hot
ceph osd tier cache-mode foo-hot writeback

## 设置操作foo经由foo-hot
ceph osd tier set-overlay foo foo-hot

## 设置foo-hot属性
ceph osd pool set foo-hot hit_set_type bloom
ceph osd pool set foo-hot hit_set_count 1
ceph osd pool set foo-hot hit_set_period 3600  # 1 hour
ceph osd pool set foo-hot target_max_bytes 1000000000000   #  1TB
ceph osd pool set foo-hot cache_target_dirty_ratio .4
ceph osd pool set foo-hot cache_target_full_ratio .8

删除cache pool

## 取消操作foo经由foo-hot
ceph osd tier cache-mode foo-hot forward
rados -p foo-hot cache-flush-evict-all

## 删除cache pool
ceph osd tier remove-overlay foo
ceph osd tier remove foo foo-hot

加速集群恢复

ceph tell osd.* injectargs '--osd_max_backfills 20'
ceph tell osd.* injectargs '--osd_recovery_sleep_hdd 0'

还原集群恢复速度

ceph tell osd.* injectargs "--osd_max_backfills 1"
ceph tell osd.* injectargs "--osd_recovery_sleep_hdd 0.1"

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值