k8s挂载ceph rbd
环境
192.168.126.101 ceph01
192.168.126.102 ceph02
192.168.126.103 ceph03
192.168.126.104 ceph04
192.168.126.105 ceph-admin
192.168.48.11 ceph01
192.168.48.12 ceph02
192.168.48.13 ceph03
192.168.48.14 ceph04
192.168.48.15 ceph-admin
192.168.48.101 master01
192.168.48.201 node01
192.168.48.202 node02
###所有节点内核版本要求4.10以上
uname -r
5.2.2-1.el7.elrepo.x86_64
[root@ceph-admin ~]# ceph -s
cluster:
id: 231d5528-bab4-49fa-9d68-d5382d2e9f6c
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 2h)
mgr: ceph04(active, since 2h), standbys: ceph03
mds: cephfs:2 {0=ceph02=up:active,1=ceph01=up:active} 1 up:standby
osd: 8 osds: 8 up (since 2h), 8 in (since 19h)
rgw: 1 daemon active (ceph01)
data:
pools: 9 pools, 352 pgs
objects: 251 objects, 14 MiB
usage: 8.1 GiB used, 64 GiB / 72 GiB avail
pgs: 352 active+clean
k8s挂载ceph rbd
创建pool
[cephadm@ceph-admin ceph-cluster]$ ceph osd pool create k8s 64 64
pool 'k8s' created
pool启动RBD功能
[cephadm@ceph-admin ceph-cluster]$ ceph osd pool application enable k8s rbd
enabled application 'rbd' on pool 'k8s'
[cephadm@ceph-admin ceph-cluster]$ rbd pool init k8s
创建RBD的image
[cephadm@ceph-admin ceph-cluster]$ rbd create --pool k8s --image k8svol01 --size 1G
[cephadm@ceph-admin ceph-cluster]$ rbd ls --pool k8s
k8svol01
禁用特性
[cephadm@ceph-admin ceph-cluster]$ rbd info k8s/k8svol01
rbd image 'k8svol01':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 37a0264798e8
block_name_prefix: rbd_data.37a0264798e8
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sun Jul 28 12:14:22 2019
access_timestamp: Sun Jul 28 12:14:22 2019
modify_timestamp: Sun Jul 28 12:14:22 2019
[cephadm@ceph-admin ceph-cluster]$ rbd feature disable k8s/k8svol01 object-map, fast-diff, deep-flatten
[cephadm@ceph-admin ceph-cluster]$ rbd info k8s/k8svol01
rbd image 'k8svol01':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 37a0264798e8
block_name_prefix: rbd_data.37a0264798e8
format: 2
features: layering, exclusive-lock
op_features:
flags:
create_timestamp: Sun Jul 28 12:14:22 2019
access_timestamp: Sun Jul 28 12:14:22 2019
modify_timestamp: Sun Jul 28 12:14:22 2019
创建客户端keyring
[cephadm@ceph-admin ceph-cluster]$ ceph auth get-or-create client.k8s mon 'allow rw' osd 'allow * pool=k8s'
[client.k8s]
key = AQB9IT1d0XppLRAAdKIBK4m7vrjUK4j1l/hsiA==
[cephadm@ceph-admin ceph-cluster]$ ceph auth get client.k8s
exported keyring for client.k8s
[client.k8s]
key = AQB9IT1d0XppLRAAdKIBK4m7vrjUK4j1l/hsiA==
caps mon = "allow rw"
caps osd = "allow * pool=k8s"
[cephadm@ceph-admin ceph-cluster]$ ceph auth get client.k8s -o ceph.client.k8s.keyring
exported keyring for client.k8s
k8s各节点安装ceph-common
yum -y install ceph-common
准备客户端keyring和ceph配置文件
[cephadm@ceph-admin ceph-cluster]$ for host in master01 node01 node02;do scp ceph.client.k8s.keyring root@$host:/etc/ceph/;done
[cephadm@ceph-admin ceph-cluster]$ for host in master01 node01 node02;do scp ceph.conf root@$host:/etc/ceph/;done
[root@node01 ceph]# ceph --user k8s -s
cluster:
id: 231d5528-bab4-49fa-9d68-d5382d2e9f6c
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 3h)
mgr: ceph04(active, since 3h), standbys: ceph03
mds: cephfs:2 {0=ceph02=up:active,1=ceph01=up:active} 1 up:standby
osd: 8 osds: 8 up (since 3h), 8 in (since 20h)
rgw: 1 daemon active (ceph01)
data:
pools: 10 pools, 416 pgs
objects: 256 objects, 14 MiB
usage: 8.1 GiB used, 64 GiB / 72 GiB avail
pgs: 416 active+clean
以keyring方式挂载
编写k8s yaml文件
[root@master01 ~]# vim pod--rbd-vol.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-rbd-vol
spec:
containers:
- image: busybox
name: busybox
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","sleep 86400"]
volumeMounts:
- name: rbdpod
mountPath: /data
volumes:
- name: rbdpod
rbd:
monitors:
- '192.168.48.11:6789'
- '192.168.48.12:6789'
- '192.168.48.13:6789'
pool: k8s
image: k8svol01
fsType: xfs
readOnly: false
user: k8s
keyring: /etc/ceph/ceph.client.k8s.keyring
[root@master01 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-rbd-vol 1/1 Running 0 22s 10.244.1.5 node01 <none> <none>
[root@master01 ~]# kubectl exec pod-rbd-vol -it -- /bin/sh
/ # cd /data/
/data # mount | grep rbd
/dev/rbd0 on /data type xfs (rw,relatime,attr2,inode64,logbsize=64k,sunit=128,swidth=128,noquota)
[root@node01 ~]# rbd showmapped
id pool namespace image snap device
0 k8s k8svol01 - /dev/rbd0
以secret方式挂载
先获取key
[cephadm@ceph-admin ceph-cluster]$ ceph auth print-key client.k8s
AQB9IT1d0XppLRAAdKIBK4m7vrjUK4j1l/hsiA==
base64加密
[cephadm@ceph-admin ceph-cluster]$ ceph auth print-key client.k8s | base64
QVFCOUlUMWQwWHBwTFJBQWRLSUJLNG03dnJqVUs0ajFsL2hzaUE9PQ==
编写k8s yaml文件
[root@master01 ~]# vim pod-rbd-vol-secret.yaml
apiVersion: v1
kind: Secret
matadata:
name: ceph-k8s-secret
type: "kubernetes.io/rbd"
data:
key: QVFCOUlUMWQwWHBwTFJBQWRLSUJLNG03dnJqVUs0ajFsL2hzaUE9PQ==
---
apiVersion: v1
kind: Pod
metadata:
name: pod-rbd-vol-secret
spec:
containers:
- image: busybox
name: busybox
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","sleep 86400"]
volumeMounts:
- name: rbdpod
mountPath: /data
volumes:
- name: rbdpod
rbd:
monitors:
- '192.168.48.11:6789'
- '192.168.48.12:6789'
- '192.168.48.13:6789'
pool: k8s
image: k8svol01
fsType: xfs
readOnly: false
user: k8s
secretRef:
name: ceph-k8s-secret
[root@master01 ~]# kubectl delete -f pod--rbd-vol.yaml
pod "pod-rbd-vol" deleted
[root@master01 ~]# kubectl apply -f pod-rbd-vol-secret.yaml
secret/ceph-k8s-secret created
pod/pod-rbd-vol-secret created
[root@master01 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-rbd-vol-secret 1/1 Running 0 22s 10.244.2.2 node02 <none> <none>
[root@master01 ~]# kubectl exec pod-rbd-vol-secret -it -- /bin/sh
/ # mount |grep rbd
/dev/rbd0 on /data type xfs (rw,relatime,attr2,inode64,logbsize=64k,sunit=128,swidth=128,noquota)
[root@node02 ~]# rbd showmapped
id pool namespace image snap device
0 k8s k8svol01 - /dev/rbd0
rbd StorageClass
二进制方式部署的,直接在个节点安装ceph-common即可
kubeadm部署的,由于controller-manager官方镜像中没有rbd命令,所以我们要导入外部配置rbd-provisioner
创建rbd-provisioner
vim rbd-provisioner.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns","coredns"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: rbd-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rbd-provisioner
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: default
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: rbd-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: rbd-provisioner
spec:
containers:
- name: rbd-provisioner
image: quay.io/external_storage/rbd-provisioner:latest
imagePullPolicy: IfNotPresent
env:
- name: PROVISIONER_NAME
value: k8s-rbd
serviceAccount: rbd-provisioner
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-provisioner
镜像下载
链接: https://pan.baidu.com/s/1t1K2AjvoC06SsxIuznELfg 提取码: w98h
docker load -i rbd-provisioner.tar.gz
通过azure镜像源下载
docker pull quay.azk8s.cn/external_storage/rbd-provisioner:latest
docker tag quay.azk8s.cn/external_storage/rbd-provisioner:latest quay.io/external_storage/rbd-provisioner:latest
[root@master01 ~]# kubectl apply -f rbd-provisioner.yaml
clusterrole.rbac.authorization.k8s.io/rbd-provisioner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner created
role.rbac.authorization.k8s.io/rbd-provisioner created
rolebinding.rbac.authorization.k8s.io/rbd-provisioner created
deployment.extensions/rbd-provisioner created
serviceaccount/rbd-provisioner created
[root@master01 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
pod-rbd-vol-secret 1/1 Running 0 57m
rbd-provisioner-6cbdf69ff9-4qmzr 1/1 Running 0 21s
创建StorageClass
获取admin和k8s用户的key
[cephadm@ceph-admin ceph-cluster]$ ceph auth print-key client.admin | base64
QVFBd0NqeGR4cnFoSGhBQXhiQjVNdTZPemlCcXU2V2NKeEhHekE9PQ==
[cephadm@ceph-admin ceph-cluster]$ ceph auth print-key client.k8s | base64
QVFCOUlUMWQwWHBwTFJBQWRLSUJLNG03dnJqVUs0ajFsL2hzaUE9PQ==
编辑yaml文件
vim rbd-sc.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-admin-secret
namespace: kube-system
data:
key: QVFBd0NqeGR4cnFoSGhBQXhiQjVNdTZPemlCcXU2V2NKeEhHekE9PQ==
type:
kubernetes.io/rbd
---
apiVersion: v1
kind: Secret
metadata:
name: ceph-k8s-secret
namespace: default
data:
key: QVFCOUlUMWQwWHBwTFJBQWRLSUJLNG03dnJqVUs0ajFsL2hzaUE9PQ==
type:
kubernetes.io/rbd
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: ceph-sc
namespace: default
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-rbd
reclaimPolicy: Retain
parameters:
monitors: 192.168.48.11:6789,192.168.48.12:6789,192.168.48.13:6789
adminId: admin
adminSecretName: ceph-admin-secret
adminSecretNamespace: kube-system
pool: k8s
fsType: xfs
userId: k8s
userSecretName: ceph-k8s-secret
imageFormat: "2"
imageFeatures: "layering"
[root@master01 ~]# kubectl apply -f rbd-sc.yaml
secret/ceph-admin-secret created
secret/ceph-k8s-secret created
storageclass.storage.k8s.io/ceph-sc created
[root@master01 ~]# kubectl get sc
NAME PROVISIONER AGE
ceph-sc (default) k8s-rbd 18s
创建pvc
vim ceph-sc-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ceph-pvc
namespace: default
spec:
storageClassName: ceph-sc
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
[root@master01 ~]# kubectl apply -f ceph-sc-pvc.yaml
persistentvolumeclaim/ceph-pvc created
[root@master01 ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-pvc Bound pvc-458393d4-0f33-4227-b85b-a08c0f2b6f02 1Gi RWO ceph-sc 6s
[root@master01 ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-458393d4-0f33-4227-b85b-a08c0f2b6f02 1Gi RWO Retain Bound default/ceph-pvc ceph-sc 2m53s
[cephadm@ceph-admin ceph-cluster]$ rbd ls -p k8s -l
NAME SIZE PARENT FMT PROT LOCK
k8svol01 1 GiB 2 excl
kubernetes-dynamic-pvc-799367a5-b10c-11e9-8e2f-82f05bffb226 1 GiB 2
创建测试应用
vim pod-rbd.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-rbd-sc
spec:
containers:
- image: busybox
name: busybox
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","sleep 86400"]
volumeMounts:
- name: ceph-rdb-vol1
mountPath: /usr/share/nginx/html
readOnly: false
volumes:
- name: ceph-rdb-vol1
persistentVolumeClaim:
claimName: ceph-pvc
[root@master01 ~]# kubectl apply -f pod-rbd.yaml
pod/pod-rbd-sc created
[root@master01 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-rbd-sc 1/1 Running 0 102s 10.244.1.6 node01 <none> <none>
pod-rbd-vol-secret 1/1 Running 0 95m 10.244.2.2 node02 <none> <none>
rbd-provisioner-6cbdf69ff9-z7p96 1/1 Running 0 13m 10.244.2.4 node02 <none> <none>
[root@master01 ~]# kubectl exec pod-rbd-sc -it -- /bin/sh
/ # mount |grep rbd
/dev/rbd0 on /usr/share/nginx/html type xfs (rw,relatime,attr2,inode64,logbsize=64k,sunit=128,swidth=128,noquota)
[root@node01 ~]# rbd showmapped
id pool namespace image snap device
0 k8s kubernetes-dynamic-pvc-799367a5-b10c-11e9-8e2f-82f05bffb226 - /dev/rbd0