k8s 挂载ceph 手动创建的rbd :
k8s 使用ceph :
本文介绍k8s怎么使用ceph 的rbd 存储
1.首先要有一个ceph 集群。搭建在此不描述
2.确保集群状态非error
直接使用rbd存储
如何吧ceph rbd 直接使用到pod 上
- 创建一个rbd 池
ceph osd pool create k8s 16 16
root@ubuntu2-15:~# ceph osd pool create k8s 16 16
pool 'k8s' created
root@ubuntu2-15:~# ceph osd dump | grep k8s
pool 15 'k8s' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 16 pgp_num 16 autoscale_mode on last_change 1181 flags hashpspool stripe_width 0
- 创建一个rbd image
root@ubuntu2-15:~# rbd create -p k8s static-rbd-1Gi --size 1G
root@ubuntu2-15:~# rbd -p k8s ls
static-rbd-1Gi
root@ubuntu2-15:~# rbd info k8s/static-rbd-1Gi
rbd image 'static-rbd-1Gi':
size 1GiB in 256 objects
order 22 (4MiB objects)
block_name_prefix: rbd_data.1328306b8b4567
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
flags:
create_timestamp: Mon May 23 14:28:13 2022
- 关闭rbd 特性
root@ubuntu2-15:~# rbd feature disable k8s/static-rbd-1Gi object-map fast-diff deep-flatten
root@ubuntu2-15:~#
- 需要吧rbd image 映射出来 做xfs ,然后unmap 掉。不然会占用
root@ubuntu2-15:~# rbd map k8s/static-rbd-1Gi
/dev/rbd0
root@ubuntu2-15:~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 1.8T 0 disk
├─sda1 8:1 0 512M 0 part /boot/efi
└─sda2 8:2 0 1.8T 0 part
├─ubuntu2--15--vg-root 253:0 0 1.8T 0 lvm /
└─ubuntu2--15--vg-swap_1 253:1 0 976M 0 lvm
sdb 8:16 0 1.8T 0 disk
└─ceph--cd27889b--e94d--4eb1--a0cb--a046d9f426ab-osd--block--bdff1d1b--eaf4--4d6a--9655--8602145225cf 253:2 0 1.8T 0 lvm
rbd0 252:0 0 1G 0 disk
nvme0n1 259:0 0 119.2G 0 disk
└─ceph--92daff84--dcc0--483e--a6ba--4cb3972c2e3c-osd--db--97b38bc8--c232--41bf--8d06--71eebd5e7ef6 253:3 0 1G 0 lvm
root@ubuntu2-15:~# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=9, agsize=31744 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=0, rmapbt=0, reflink=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
root@ubuntu2-15:~# rbd unmap k8s/static-rbd-1Gi
root@ubuntu2-15:~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 1.8T 0 disk
├─sda1 8:1 0 512M 0 part /boot/efi
└─sda2 8:2 0 1.8T 0 part
├─ubuntu2--15--vg-root 253:0 0 1.8T 0 lvm /
└─ubuntu2--15--vg-swap_1 253:1 0 976M 0 lvm
sdb 8:16 0 1.8T 0 disk
└─ceph--cd27889b--e94d--4eb1--a0cb--a046d9f426ab-osd--block--bdff1d1b--eaf4--4d6a--9655--8602145225cf 253:2 0 1.8T 0 lvm
nvme0n1 259:0 0 119.2G 0 disk
└─ceph--92daff84--dcc0--483e--a6ba--4cb3972c2e3c-osd--db--97b38bc8--c232--41bf--8d06--71eebd5e7ef6 253:3 0 1G 0 lvm
- 创建ceph 给k8s集群使用的用户。然后吧key拿出来。测试环境直接给所有权限了
root@ubuntu2-15:~# ceph auth get-or-create client.k8s mon 'allow *' mds 'allow *' osd 'allow *'
[client.k8s]
key = AQDNNYtijoLQKRAALNLmu7YJ3LW1JDyYcmK3uQ==
root@ubuntu2-15:~# echo AQDNNYtijoLQKRAALNLmu7YJ3LW1JDyYcmK3uQ== | base64 -d
root@ubuntu2-15:~# echo "AQDNNYtijoLQKRAALNLmu7YJ3LW1JDyYcmK3uQ==" | base64
QVFETk5ZdGlqb0xRS1JBQUxOTG11N1lKM0xXMUpEeVljbUszdVE9PQo=
- 到k8s环境创建secret
root@k8s-ceph:~/lim/yanshi# cat csi-rbd-secret-k8s.yaml
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret-k8s
namespace: default
type: "kubernetes.io/rbd"
stringData:
userID: k8s
userKey: AQDNNYtijoLQKRAALNLmu7YJ3LW1JDyYcmK3uQ==
root@k8s-ceph:~/lim/yanshi# kubectl get secrets
NAME TYPE DATA AGE
csi-rbd-secret-k8s kubernetes.io/rbd 2 63m
default-token-mxkxl kubernetes.io/service-account-token 3 65m
rbd-secret kubernetes.io/rbd 1 3d1h
- 创建pv,pvc image 要对应到rbd -p k8s ls 下的 image 名称。
root@k8s-ceph:~/lim/yanshi# cat pv-pvc-k8s.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: static-rbd-k8s-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
- ReadOnlyMany
rbd:
monitors:
- 192.168.2.15:6789
- 192.168.2.16:6789
- 192.168.2.17:6789
pool: k8s
image: static-rbd-1Gi
user: k8s
secretRef:
name: csi-rbd-secret-k8s
fsType: xfs
persistentVolumeReclaimPolicy: Retain
---
# cat static_pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: static-rbd-k8s-claim
spec:
accessModes:
- ReadWriteOnce
- ReadOnlyMany
resources:
requests:
storage: 1Gi
root@k8s-ceph:~/lim/yanshi# kubectl apply -f pv-pvc-k8s.yaml
persistentvolume/static-rbd-k8s-pv created
persistentvolumeclaim/static-rbd-k8s-claim unchanged
root@k8s-ceph:~/lim/yanshi# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
aiplatform-ailab-data-pvc Bound default-aiplatform-ailab-data-pv 300Mi RWX 3d
aiplatform-app-data-pvc Bound default-aiplatform-app-data-pv 300Mi RWX 3d
aiplatform-dataset-data-pvc Bound default-aiplatform-dataset-data-pv 300Mi RWX 3d
aiplatform-gitea-data-pvc Bound default-aiplatform-gitea-data-pv 300Mi RWX 3d
aiplatform-model-data-pvc Bound default-aiplatform-model-data-pv 300Mi RWX 3d
raw-block-pvc Bound pvc-0a136566-7d42-4b82-b8da-ae15ffc5dc16 1Gi RWO csi-rbd-sc 3h41m
static-rbd-claim Bound static-rbd-pv 2Gi RWO,ROX 3d
static-rbd-k8s-claim Bound static-rbd-k8s-pv 1Gi RWO,ROX 47s
- 安装ceph-common 工具。 吧ceph 的key放到/ect/ceph/下,从ceph集群直接拷贝即可
apt update
apt install ceph-common -y
必须安装必须可以使用 ceph -s 看到对应的ceph输出
- 创建pod 挂载对应的pvc
root@k8s-ceph:~/lim/yanshi# cat busybox.yaml
apiVersion: v1
kind: Pod
metadata:
name: busybox-lim
namespace: default
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
volumeMounts:
- mountPath: "/data"
name: data
restartPolicy: Always
volumes:
- name: data
persistentVolumeClaim:
claimName: static-rbd-k8s-claim
root@k8s-ceph:~/lim/yanshi# kubectl apply -f busybox.yaml
pod/busybox-lim configured
root@k8s-ceph:~/lim/yanshi# kubectl get pod
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 74 3d1h
busybox-lim 1/1 Running 0 3m21s
完成rbd static 挂载
下一篇动态挂载 ceph rbd
https://blog.csdn.net/a755142155/article/details/124931544