1 创建NFS共享目录
# 创建目录
sudo mkdir -p /data/k8s
# 添加权限
sudo chmod 777 /data/k8s
# 编辑文件
sudo vim /etc/exports
# 添加以下内容
# 下面的含有“*”的旧版本可以,新版本可能出问题
# /data/k8s 192.168.108.*(rw,sync,no_subtree_check)
/data/k8s 192.168.108.0/24(rw,sync,no_subtree_check)
# 重启服务
sudo service nfs-kernel-server restart
# 查看共享目录
sudo showmount -e 192.168.108.100
# 返回值如下,表示创建成功
Export list for 192.168.108.100:
/data/k8s 192.168.108.0/24
2 设置存储分配器的权限
创建nfs-client-provisioner-authority.yaml文件
# nfs-client-provisioner-authority.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
3 创建NFS存储分配器
创建nfs-client-provisioner.yaml文件
# nfs-client-provisioner.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
# 存储分配器名称
- name: PROVISIONER_NAME
value: nfs-provisioner
# NFS服务器地址,设置为自己的IP
- name: NFS_SERVER
value: 192.168.108.100
# NFS共享目录地址
- name: NFS_PATH
value: /data/k8s
volumes:
- name: nfs-client-root
nfs:
# 设置为自己的IP
server: 192.168.108.100
# 对应NFS上的共享目录
path: /data/k8s
4 创建StorageClass
创建nfs-storage-class.yaml文件
# nfs-storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-data
# 存储分配器的名称
# 对应“nfs-client-provisioner.yaml”文件中env.PROVISIONER_NAME.value
provisioner: nfs-provisioner
# 允许pvc创建后扩容
allowVolumeExpansion: True
parameters:
# 资源删除策略,“true”表示删除PVC时,同时删除绑定的PV
archiveOnDelete: "true"
查看StorageClass
kubectl get storageclass
5 创建PVC
创建nfs-pvc.yaml文件
# nfs-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
# 注意,后面Deployment申请资源需要用到此处的名称
name: nfs-pvc
spec:
# 设置资源的访问策略,ReadWriteMany表示该卷可以被多个节点以读写模式挂载;
accessModes:
- ReadWriteMany
# 设置资源的class名称
# 注意,此处的名称必须与“nfs-storage-class.yaml”中的storageClassName相同
storageClassName: nfs-data
# 设置申请的资源大小
resources:
requests:
storage: 100Mi
查看PVC
kubectl get pvc
6 创建Deployment控制器
创建nfs-deployment-python.yaml文件
# nfs-deployment-python.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-deployment-python
spec:
replicas: 2
selector:
matchLabels:
app: python-nfs
template:
metadata:
labels:
app: python-nfs
spec:
containers:
- name: python-nfs
image: python:3.8.2
imagePullPolicy: IfNotPresent
command: ['/bin/bash', '-c', '--']
# 启动"python -m http.server 80"服务,“>>”表示向文件中追加数据
args: ['echo "<p> The host is $(hostname) </p>" >> /containerdata/podinfor; python -m http.server 80']
# 设置80端口
ports:
- name: http
containerPort: 80
# 设置挂载点
volumeMounts:
# 此处的名称与volumes有对应关系
- name: python-nfs-data
mountPath: /containerdata
# 配置nfs存储卷
volumes:
# 此处的名称需与spec.containers.volumeMounts.name相同
- name: python-nfs-data
# 向PVC申请资源,此处的名称对应# nfs-pvc.yaml文件中的metadata.name
persistentVolumeClaim:
claimName: nfs-pvc