版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
###查看crushmap
-
[root@ceph01 ~]#
ceph
osd
getcrushmap
-o
crushmap
.txt
-
97
-
[root@ceph01 ~]#
crushtool
-d
crushmap
.txt
-o
crushmap-decompile
-
[root@ceph01 ~]#
ls
-
anaconda-ks
.cfg
crushmap-decompile
crushmap
.txt
# types bucket(桶)的类型有11 种,
-
type
0 osd
-
type
1 host
-
type
2 chassis
-
type
3 rack
-
type
4 row
-
type
5 pdu
-
type
6 pod
-
type
7 room
-
type
8 datacenter
-
type
9 region
-
type
10 root
###查看 osd 设备类型
-
[
root@ceph01 ~]# ceph osd crush
class ls-osd hdd
-
0
-
1
-
2
-
3
-
4
-
5
-
6
-
7
###删除某些osd设备类型
-
[
root@ceph01 ~]# ceph osd crush rm-device-
class 0
-
done removing
class of osd(s):
0
-
[
root@ceph01 ~]# ceph osd crush rm-device-
class 2
-
done removing
class of osd(s):
2
-
[
root@ceph01 ~]# ceph osd crush rm-device-
class 4
-
done removing
class of osd(s):
4
-
[
root@ceph01 ~]# ceph osd crush rm-device-
class 6
-
done removing
class of osd(s):
6
###新增osd 设备类型
-
[
root@ceph01 ~]# ceph osd crush
set-device-
class ssd 0
-
set osd(s)
0 to
class 'ssd'
-
[
root@ceph01 ~]# ceph osd crush
set-device-
class ssd 2
-
set osd(s)
2 to
class 'ssd'
-
[
root@ceph01 ~]# ceph osd crush
set-device-
class ssd 4
-
set osd(s)
4 to
class 'ssd'
-
[
root@ceph01 ~]# ceph osd crush
set-device-
class ssd 6
-
set osd(s)
6 to
class 'ssd'
###查看osd 类型是否修改
-
[root@ceph01 ~]#
ceph
osd
tree
-
ID
CLASS
WEIGHT
TYPE
NAME
STATUS
REWEIGHT
PRI-AFF
-
-1 0
.39038
root
default
-
-3 0
.09760
host
ceph01
-
1
hdd 0
.04880
osd
.1
up 1
.00000 1
.00000
-
0
ssd 0
.04880
osd
.0
up 1
.00000 1
.00000
-
-5 0
.09760
host
ceph02
-
3
hdd 0
.04880
osd
.3
up 1
.00000 1
.00000
-
2
ssd 0
.04880
osd
.2
up 1
.00000 1
.00000
-
-7 0
.09760
host
ceph03
-
5
hdd 0
.04880
osd
.5
up 1
.00000 1
.00000
-
4
ssd 0
.04880
osd
.4
up 1
.00000 1
.00000
-
-9 0
.09760
host
ceph04
-
7
hdd 0
.04880
osd
.7
up 1
.00000 1
.00000
-
6
ssd 0
.04880
osd
.6
up 1
.00000 1
.00000
-
[root@ceph01 ~]#
###osd 类型 有原来的hdd 变为2个类型
-
[
root@ceph01 ~]
# ceph osd crush class ls
-
[
-
"hdd",
-
"ssd"
-
]
###创建一个类型为root 的bucket
-
[root@ceph01 ~]
# ceph osd crush add-bucket ceph-ssd root
-
added bucket ceph-ssd type root to crush
map
####创建一个rule
[root@ceph01 ~]# ceph osd crush rule create-simple ssd ceph-ssd host firstn
##创建4个类型为host 的 4个bucket
-
[root@ceph01 ~]
# ceph osd crush add-bucket ceph01-ssd host
-
added bucket ceph01-ssd type host to crush
map
-
[root@ceph01 ~]
# ceph osd crush add-bucket ceph02-ssd host
-
added bucket ceph02-ssd type host to crush
map
-
[root@ceph01 ~]
# ceph osd crush add-bucket ceph03-ssd host
-
added bucket ceph03-ssd type host to crush
map
-
[root@ceph01 ~]
# ceph osd crush add-bucket ceph04-ssd host
-
added bucket ceph04-ssd type host to crush
map
##把 4个bucket 放到 类型为root 的bucket 中
-
##ceph osd crush move {bucket-name} {bucket-type}={bucket-name}, [...]
-
[root@ceph01 ~]
# ceph osd crush move ceph04-ssd root=ceph-ssd
-
moved item id -
35 name
'ceph04-ssd' to location {root=ceph-ssd} in crush
map
-
[root@ceph01 ~]
# ceph osd crush move ceph03-ssd root=ceph-ssd
-
moved item id -
34 name
'ceph03-ssd' to location {root=ceph-ssd} in crush
map
-
[root@ceph01 ~]
# ceph osd crush move ceph02-ssd root=ceph-ssd
-
moved item id -
33 name
'ceph02-ssd' to location {root=ceph-ssd} in crush
map
-
[root@ceph01 ~]
# ceph osd crush move ceph01-ssd root=ceph-ssd
-
moved item id -
32 name
'ceph01-ssd' to location {root=ceph-ssd} in crush
map
-
[root@ceph01 ~]
#
####把ssd 类型的盘全部移到相应的ceph0X-ssd 下面
-
##ceph osd crush move osd.x {bucket-type}={bucket-name}, [...]
-
[root@ceph01 ~]
# ceph osd crush move osd.0 host=ceph01-ssd
-
moved item id
0 name
'osd.0' to location {host=ceph01-ssd} in crush
map
-
[root@ceph01 ~]
# ceph osd crush move osd.2 host=ceph02-ssd
-
moved item id
2 name
'osd.2' to location {host=ceph02-ssd} in crush
map
-
[root@ceph01 ~]
# ceph osd crush move osd.4 host=ceph03-ssd
-
moved item id
4 name
'osd.4' to location {host=ceph03-ssd} in crush
map
-
[root@ceph01 ~]
# ceph osd crush move osd.6 host=ceph04-ssd
-
moved item id
6 name
'osd.6' to location {host=ceph04-ssd} in crush
map
-
[root@ceph01 ~]
#
####查看osd 分布情况
-
[root@ceph01 ~]#
ceph
osd
tree
-
ID
CLASS
WEIGHT
TYPE
NAME
STATUS
REWEIGHT
PRI-AFF
-
-31 0
.19519
root
ceph-ssd
-
-32 0
.04880
host
ceph01-ssd
-
0
ssd 0
.04880
osd
.0
up 1
.00000 1
.00000
-
-33 0
.04880
host
ceph02-ssd
-
2
ssd 0
.04880
osd
.2
up 1
.00000 1
.00000
-
-34 0
.04880
host
ceph03-ssd
-
4
ssd 0
.04880
osd
.4
up 1
.00000 1
.00000
-
-35 0
.04880
host
ceph04-ssd
-
6
ssd 0
.04880
osd
.6
up 1
.00000 1
.00000
-
-1 0
.19519
root
default
-
-3 0
.04880
host
ceph01
-
1
hdd 0
.04880
osd
.1
up 1
.00000 1
.00000
-
-5 0
.04880
host
ceph02
-
3
hdd 0
.04880
osd
.3
up 1
.00000 1
.00000
-
-7 0
.04880
host
ceph03
-
5
hdd 0
.04880
osd
.5
up 1
.00000 1
.00000
-
-9 0
.04880
host
ceph04
-
7
hdd 0
.04880
osd
.7
up 1
.00000 1
.00000
-
[root@ceph01 ~]#
####创建一个后端类型是ssd 的pool
-
[root@ceph01 ~]
# ceph osd pool create pool_ssd 32 replicated ssd
-
pool
'pool_ssd' created
###查看pool
-
[root@ceph01 ~]#
ceph
osd
pool
ls
-
images
-
images2
-
images3
-
.rgw
.root
-
default
.rgw
.control
-
default
.rgw
.meta
-
default
.rgw
.log
-
cephfs_data
-
cephfs_metadata
-
pool_ssd
-
[root@ceph01 ~]#
##https://docs.ceph.com/docs/master/rados/operations/crush-map/#crush-map-bucket-types