k8s使用ceph实现动态持久化存储

简介

本文章介绍如何使用ceph为k8s提供动态申请pv的功能。ceph 提供底层存储功能,cephfs方式支持k8s的pv的3种访问模式ReadWriteOnce,ReadOnlyMany ,ReadWriteMany ,RBD支持ReadWriteOnce,ReadOnlyMany两种模式

访问模式只是能力描述,并不是强制执行的,对于没有按pvc声明的方式使用pv,存储提供者应该负责访问时的运行错误。例如如果设置pvc的访问模式为ReadOnlyMany ,pod挂载后依然可写,如果需要真正的不可写,申请pvc是需要指定 readOnly: true 参数

部署

部署k8s

centos7使用kubeadm安装k8s-1.11版本

部署ceph

centos7安装ceph分布式存储集群

在k8s集群中配置使用ceph

使用Ceph RBD
使用kubeadm安装集群的额外配置
# 如果使用kubeadm部署的集群需要这些额外的步骤 # 由于使用动态存储时 controller-manager 需要使用 rbd 命令创建 image # 所以 controller-manager 需要使用 rbd 命令 # 由于官方controller-manager镜像里没有rbd命令 # 如果没使用如下方式会报错无法成功创建pvc # 相关 issue https://github.com/kubernetes/kubernetes/issues/38923
cat >external-storage-rbd-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
 name: rbd-provisioner
 namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: rbd-provisioner
rules:
 - apiGroups: [""]
 resources: ["persistentvolumes"]
 verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
 resources: ["persistentvolumeclaims"]
 verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
 resources: ["storageclasses"]
 verbs: ["get", "list", "watch"]
 - apiGroups: [""]
 resources: ["events"]
 verbs: ["create", "update", "patch"]
 - apiGroups: [""]
 resources: ["endpoints"]
 verbs: ["get", "list", "watch", "create", "update", "patch"]
 - apiGroups: [""]
 resources: ["services"]
 resourceNames: ["kube-dns"]
 verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: rbd-provisioner
subjects:
 - kind: ServiceAccount
 name: rbd-provisioner
 namespace: kube-system
roleRef:
 kind: ClusterRole
 name: rbd-provisioner
 apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
 name: rbd-provisioner
 namespace: kube-system
rules:
- apiGroups: [""]
 resources: ["secrets"]
 verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
 name: rbd-provisioner
 namespace: kube-system
roleRef:
 apiGroup: rbac.authorization.k8s.io
 kind: Role
 name: rbd-provisioner
subjects:
- kind: ServiceAccount
 name: rbd-provisioner
 namespace: kube-system

---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
 name: rbd-provisioner
 namespace: kube-system
spec:
 replicas: 1
 strategy:
 type: Recreate
 template:
 metadata:
 labels:
 app: rbd-provisioner
 spec:
 containers:
 - name: rbd-provisioner
 image: "quay.io/external_storage/rbd-provisioner:v2.0.0-k8s1.11"
 env:
 - name: PROVISIONER_NAME
 value: ceph.com/rbd
 serviceAccount: rbd-provisioner
EOF
kubectl apply -f external-storage-rbd-provisioner.yaml

# 查看状态 等待running之后 再进行后续的操作
kubectl get pod -n kube-system
复制代码
配置 storageclass
# 在k8s集群中所有节点安装 ceph-common # 需要使用kubelet使用rdb命令map附加rbd创建的image
yum install -y ceph-common

# 创建 osd pool 在ceph的mon或者admin节点
ceph osd pool create kube 4096
ceph osd pool ls

# 创建k8s访问ceph的用户 在ceph的mon或者admin节点
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring

# 查看key 在ceph的mon或者admin节点
ceph auth get-key client.admin
ceph auth get-key client.kube

# 创建 admin secret # CEPH_ADMIN_SECRET 替换为 client.admin 获取到的key export CEPH_ADMIN_SECRET='AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw=='
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system

# 在 default 命名空间创建pvc用于访问ceph的 secret # CEPH_KUBE_SECRET 替换为 client.kube 获取到的key export CEPH_KUBE_SECRET='AQBZK3VbTN/QOBAAIYi6CRLQcVevW5HM8lunOg=='
kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_KUBE_SECRET \
--namespace=default

# 查看 secret
kubectl get secret ceph-user-secret -o yaml
kubectl get secret ceph-secret -n kube-system -o yaml

# 配置 StorageClass # 如果使用kubeadm创建的集群 provisioner 使用如下方式 # provisioner: ceph.com/rbd
cat >storageclass-ceph-rdb.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
 name: dynamic-ceph-rdb
provisioner: ceph.com/rbd
# provisioner: kubernetes.io/rbd
parameters:
 monitors: 11.11.11.111:6789,11.11.11.112:6789,11.11.11.113:6789
 adminId: admin
 adminSecretName: ceph-secret
 adminSecretNamespace: kube-system
 pool: kube
 userId: kube
 userSecretName: ceph-user-secret
 fsType: ext4
 imageFormat: "2"
 imageFeatures: "layering"
EOF

# 创建
kubectl apply -f storageclass-ceph-rdb.yaml

# 查看
kubectl get sc
复制代码
测试使用
# 创建pvc测试
cat >ceph-rdb-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
 name: ceph-rdb-claim
spec:
 accessModes: 
 - ReadWriteOnce
 storageClassName: dynamic-ceph-rdb
 resources:
 requests:
 storage: 2Gi
EOF
kubectl apply -f ceph-rdb-pvc-test.yaml
 
# 查看
kubectl get pvc
kubectl get pv
 
# 创建 nginx pod 挂载测试
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
 name: nginx-pod1
 labels:
 name: nginx-pod1
spec:
 containers:
 - name: nginx-pod1
 image: nginx:alpine
 ports:
 - name: web
 containerPort: 80
 volumeMounts:
 - name: ceph-rdb
 mountPath: /usr/share/nginx/html
 volumes:
 - name: ceph-rdb
 persistentVolumeClaim:
 claimName: ceph-rdb-claim
EOF
kubectl apply -f nginx-pod.yaml
 
# 查看
kubectl get pods -o wide
 
# 修改文件内容
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html' # 访问测试
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID # 清理
kubectl delete -f nginx-pod.yaml
kubectl delete -f ceph-rdb-pvc-test.yaml
复制代码
使用 CephFS

linux内核需要4.10+,否则会出现无法正常使用的问题,详细issue信息 github.com/kubernetes-… centos7升级内核

在ceph集群创建CephFS
# 如下操作在ceph的mon或者admin节点 # CephFS需要使用两个Pool来分别存储数据和元数据
ceph osd pool create fs_data 128
ceph osd pool create fs_metadata 128
ceph osd lspools

# 创建一个CephFS
ceph fs new cephfs fs_metadata fs_data

# 查看
ceph fs ls
复制代码
部署cephfs-provisioner
# 官方没有cephfs动态卷支持 # 使用社区提供的cephfs-provisioner
cat >external-storage-cephfs-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
 name: cephfs-provisioner
 namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: cephfs-provisioner
rules:
 - apiGroups: [""]
 resources: ["persistentvolumes"]
 verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
 resources: ["persistentvolumeclaims"]
 verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
 resources: ["storageclasses"]
 verbs: ["get", "list", "watch"]
 - apiGroups: [""]
 resources: ["events"]
 verbs: ["create", "update", "patch"]
 - apiGroups: [""]
 resources: ["endpoints"]
 verbs: ["get", "list", "watch", "create", "update", "patch"]
 - apiGroups: [""]
 resources: ["secrets"]
 verbs: ["create", "get", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: cephfs-provisioner
subjects:
 - kind: ServiceAccount
 name: cephfs-provisioner
 namespace: kube-system
roleRef:
 kind: ClusterRole
 name: cephfs-provisioner
 apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
 name: cephfs-provisioner
 namespace: kube-system
rules:
 - apiGroups: [""]
 resources: ["secrets"]
 verbs: ["create", "get", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
 name: cephfs-provisioner
 namespace: kube-system
roleRef:
 apiGroup: rbac.authorization.k8s.io
 kind: Role
 name: cephfs-provisioner
subjects:
- kind: ServiceAccount
 name: cephfs-provisioner
 namespace: kube-system

---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
 name: cephfs-provisioner
 namespace: kube-system
spec:
 replicas: 1
 strategy:
 type: Recreate
 template:
 metadata:
 labels:
 app: cephfs-provisioner
 spec:
 containers:
 - name: cephfs-provisioner
 image: "quay.io/external_storage/cephfs-provisioner:v2.0.0-k8s1.11"
 env:
 - name: PROVISIONER_NAME
 value: ceph.com/cephfs
 command:
 - "/usr/local/bin/cephfs-provisioner"
 args:
 - "-id=cephfs-provisioner-1"
 serviceAccount: cephfs-provisioner
EOF
kubectl apply -f external-storage-cephfs-provisioner.yaml

# 查看状态 等待running之后 再进行后续的操作
kubectl get pod -n kube-system
复制代码

#####配置 storageclass

# 查看key 在ceph的mon或者admin节点
ceph auth get-key client.admin

# 创建 admin secret # CEPH_ADMIN_SECRET 替换为 client.admin 获取到的key # 如果在测试 ceph rbd 方式已经添加 可以略过此步骤 export CEPH_ADMIN_SECRET='AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw=='
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system

# 查看 secret
kubectl get secret ceph-secret -n kube-system -o yaml

# 配置 StorageClass
cat >storageclass-cephfs.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
 name: dynamic-cephfs
provisioner: ceph.com/cephfs
parameters:
 monitors: 11.11.11.111:6789,11.11.11.112:6789,11.11.11.113:6789
 adminId: admin
 adminSecretName: ceph-secret
 adminSecretNamespace: "kube-system"
 claimRoot: /volumes/kubernetes
EOF

# 创建
kubectl apply -f storageclass-cephfs.yaml

# 查看
kubectl get sc
复制代码
测试使用
# 创建pvc测试
cat >cephfs-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
 name: cephfs-claim
spec:
 accessModes: 
 - ReadWriteOnce
 storageClassName: dynamic-cephfs
 resources:
 requests:
 storage: 2Gi
EOF
kubectl apply -f cephfs-pvc-test.yaml
 
# 查看
kubectl get pvc
kubectl get pv
 
# 创建 nginx pod 挂载测试
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
 name: nginx-pod1
 labels:
 name: nginx-pod1
spec:
 containers:
 - name: nginx-pod1
 image: nginx:alpine
 ports:
 - name: web
 containerPort: 80
 volumeMounts:
 - name: cephfs
 mountPath: /usr/share/nginx/html
 volumes:
 - name: cephfs
 persistentVolumeClaim:
 claimName: cephfs-claim
EOF
kubectl apply -f nginx-pod.yaml
 
# 查看
kubectl get pods -o wide
 
# 修改文件内容
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from CephFS!!! > /usr/share/nginx/html/index.html' # 访问测试
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID # 清理
kubectl delete -f nginx-pod.yaml
kubectl delete -f cephfs-pvc-test.yaml

本文转自掘金- k8s使用ceph实现动态持久化存储
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值