<文章感谢 xingdian >
文章目录
kubernetes集群中ceph集群使用
一:CephFS 创建和使用
CephFS 允许用户挂载一个兼容posix的共享目录到多个主机,该存储和NFS共享存储以及CIFS共享目录相似
1.filesystem 配置
filesystem.yaml: 3份副本的生产环境配置,需要至少3个节点
filesystem-ec.yaml: 纠错码的生产环境配置,需要至少3个节点
filesystem-test.yaml: 1份副本的测试环境,只需要一个节点
[root@master ~]# cd /tmp/rook/cluster/examples/kubernetes/ceph
[root@master ceph]# sed -i 's/failureDomain: host/failureDomain: osd/g' filesystem.yaml
[root@master ceph]# kubectl apply -f filesystem.yaml
cephfilesystem.ceph.rook.io/myfs created
[root@master cephfs]# kubectl -n rook-ceph get pod -l app=rook-ceph-mds
NAME READY STATUS RESTARTS AGE
rook-ceph-mds-myfs-a-5bd6895d9-mbbm6 1/1 Running 0 7m21s
rook-ceph-mds-myfs-b-7d7b55684b-j5f5x 1/1 Running 0 7m4s
2.查看资源配置
[root@master ceph]# NAME=$(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}')
[root@master ceph]# kubectl -n rook-ceph exec -it ${NAME} sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
sh-4.2# ceph status
cluster:
id: fb3cdbc2-8fea-4346-b752-131fd1eb2baf
health: HEALTH_ERR
1 filesystem is offline
1 filesystem is online with fewer MDS than max_mds
1/3 mons down, quorum a,b
services:
mon: 3 daemons, quorum (age 2h), out of quorum: a, b, c
mgr: a(active, since 136y)
mds: myfs:0
osd: 4 osds: 3 up (since 136y), 3 in (since 136y)
data:
pools: 2 pools, 64 pgs
objects: 0 objects, 0 B
usage: 3.0 GiB used, 21 GiB / 24 GiB avail
pgs: 64 active+clean
sh-4.2# ceph osd lspools
1 myfs-metadata
2 myfs-data0
sh-4.2# ceph mds stat
myfs:1 {
0=