镜像拉取
由于下载不到国外的镜像,只能使用这个笨办法了
下载国内镜像(所有节点执行)
docker pull xxx/cephcsi:v3.6.1-xxx2.8.3.1216
docker pull registry.aliyuncs.com/it00021hot/csi-provisioner:v3.1.0
docker pull registry.aliyuncs.com/it00021hot/csi-resizer:v1.4.0
docker pull registry.aliyuncs.com/it00021hot/csi-snapshotter:v5.0.1
docker pull registry.aliyuncs.com/it00021hot/csi-attacher:v3.4.0
docker pull registry.aliyuncs.com/it00021hot/csi-node-driver-registrar:v2.4.0
docker tag xxx/cephcsi:v3.6.1-csp2.8.3.1216 quay.io/cephcsi/cephcsi:v3.6.1
docker tag registry.aliyuncs.com/it00021hot/csi-provisioner:v3.1.0 k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
docker tag registry.aliyuncs.com/it00021hot/csi-resizer:v1.4.0 k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
docker tag registry.aliyuncs.com/it00021hot/csi-snapshotter:v5.0.1 k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1
docker tag registry.aliyuncs.com/it00021hot/csi-attacher:v3.4.0 k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
docker tag registry.aliyuncs.com/it00021hot/csi-node-driver-registrar:v2.4.0 k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
下载源码
git clone https://github.com/ceph/ceph-csi.git -b release-v3.4
cd ceph-csi/deploy/cephfs/kubernetes
修改yaml文件
把文件csi-config-map.yaml修改成
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "7da739da-dc6c-4b8d-9086-2a6e1ad9d2b7",
"monitors": [
"172.27.16.3:6789",
"172.27.16.11:6789",
"172.27.16.7:6789"
],
"cephFS": {
"subvolumeGroup": "test"
}
}
]
metadata:
name: ceph-csi-config
在主节点执行
kubectl taint nodes k8s-master node-role.kubernetes.io/master-
不执行这句,会导致调度到主节点的pod处于appending状态
创建pod
kubectl apply -f ceph-csi/deploy/cephfs/kubernetes/
创建成功
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
csi-cephfsplugin-4dgb4 3/3 Running 0 3h30m
csi-cephfsplugin-htnb9 3/3 Running 0 101m
csi-cephfsplugin-provisioner-cbc4bb7bc-dvvkj 6/6 Running 0 3h30m
csi-cephfsplugin-provisioner-cbc4bb7bc-hwhzr 6/6 Running 0 3h30m
csi-cephfsplugin-provisioner-cbc4bb7bc-nqmz2 6/6 Running 0 3h30m
csi-cephfsplugin-v5f2w 3/3 Running 0 3h30m
创建ceph-conf.yaml
---
# This is a sample configmap that helps define a Ceph configuration as required
# by the CSI plugins.
# Sample ceph.conf available at
# https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed
# documentation is available at
# https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/
apiVersion: v1
kind: ConfigMap
data:
ceph.conf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
debug_client = 0
client_reconnect_stale = false
debug_limit = 0
# Workaround for http://tracker.ceph.com/issues/23446
fuse_set_user_groups = false
# ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
# adding 'fuse_big_writes = true' option by default to override this limit
# see https://github.com/ceph/ceph-csi/issues/1928
fuse_big_writes = true
# keyring is a required key and its value should be empty
keyring: |
metadata:
name: ceph-config
创建csi-secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: csi-cephfs-secret
stringData:
userID: admin
userKey: AQDJyiJjAAAAABAAMY0lnaB8kISARsMCg5MUtg==
adminID: admin
adminKey: AQDJyiJjAAAAABAAMY0lnaB8kISARsMCg5MUtg==
userID和adminID是keyring的用户名
userKey和adminKey是keyring的值
创建storage class
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-cephfs-sc
provisioner: cephfs.csi.ceph.com
parameters:
clusterID: 7da739da-dc6c-4b8d-9086-2a6e1ad9d2b7
fsName: 00000001-fs
#pool: 00000001-fs.meta
mounter: fuse
csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
创建pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: csi-cephfs-sc
[root@k8s-master kubernetes]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data Bound pvc-051dda03-58e2-45f3-9b4a-6de7d745d1bb 1Gi RWX csi-cephfs-sc 164m
[root@k8s-master kubernetes]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-051dda03-58e2-45f3-9b4a-6de7d745d1bb 1Gi RWX Delete Bound default/data csi-cephfs-sc 164m
[root@k8s-master kubernetes]#
创建POD使用pvc
apiVersion: v1
kind: Pod
metadata:
name: ceph-fuse
spec:
containers:
- name: mypod1
image: centos:centos8
args:
- /bin/bash
- -c
- sleep 10; touch /tmp/healthy; sleep 30000
volumeMounts:
- mountPath: "/mydata"
name: mydata
volumes:
- name: mydata
persistentVolumeClaim:
claimName: data
pod创建成功
[root@k8s-master 5c9e33c8-8e6d-4553-a7cd-9b33c154856e]# kubectl get pods
NAME READY STATUS RESTARTS AGE
ceph-fuse 1/1 Running 0 100m
csi-cephfsplugin-6fqdt 3/3 Running 0 170m
csi-cephfsplugin-nd7nh 3/3 Running 0 170m
csi-cephfsplugin-provisioner-669d7fb56c-hjwrv 6/6 Running 0 170m
csi-cephfsplugin-provisioner-669d7fb56c-j4nz9 6/6 Running 0 170m
csi-cephfsplugin-provisioner-669d7fb56c-zdvpl 6/6 Running 0 170m
csi-cephfsplugin-q4lxp 3/3 Running 0 170m
[root@k8s-master 5c9e33c8-8e6d-4553-a7cd-9b33c154856e]#