nfs服务器
cat /etc/exports
/data *(insecure,rw,sync,no_root_squash)
创建serveraccount
vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
创建storageclass
vi nfs-StorageClass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: qgg-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致
parameters:
archiveOnDelete: "false"
kubectl get storageclasses
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
managed-nfs-storage qgg-nfs-storage Delete Immediate false 45s
创建NFS provisioner
vi nfs-provisioner.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: harbor.wuxingge.com/sit/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: qgg-nfs-storage #provisioner名称,请确保该名称与 nfs-StorageClass.yaml文件中的provisioner名称保持一致
- name: NFS_SERVER
value: 10.10.110.212 #NFS Server IP地址
- name: NFS_PATH
value: /data/sc #NFS挂载卷
volumes:
- name: nfs-client-root
nfs:
server: 10.10.110.212 #NFS Server IP地址
path: /data/sc #NFS 挂载卷
测试
动态创建pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: common
namespace: sit
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
storageClassName: managed-nfs-storage
deployment使用动态创建的pvc
apiVersion: apps/v1
kind: Deployment
metadata:
name: vcloud-common-api
namespace: sit
spec:
replicas: 1
selector:
matchLabels:
appname: vcloud-common-api
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/path: /actuator/prometheus
prometheus.io/port: "8010"
prometheus.io/scrape: "true"
labels:
appname: vcloud-common-api
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: appname
operator: In
values:
- vcloud-common-api
topologyKey: kubernetes.io/hostname
weight: 50
containers:
- env:
- name: aliyun_logs_sit-api
value: /logs/*.log
- name: aliyun_logs_sit-api_tags
value: app=vcloud-common-api
image: harbor.sit.wuxingge.com.cn/sit/vcloud-common-api:28
imagePullPolicy: IfNotPresent
name: vcloud-common-api
volumeMounts:
- mountPath: /data/v5dfs_files
name: commonmount
- mountPath: /logs
name: logs-pilot
volumes:
- name: commonmount
persistentVolumeClaim:
claimName: common
- emptyDir: {}
name: logs-pilot
StatefulSet动态申请pvc
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: my-nginx-demo
labels:
app.kubernetes.io/name: my-nginx-demo
app.kubernetes.io/version: "1.0"
spec:
replicas: 1
serviceName: my-nginx-demo-svc
selector:
matchLabels:
app.kubernetes.io/name: my-nginx-demo
template:
metadata:
labels:
app.kubernetes.io/name: my-nginx-demo
spec:
restartPolicy: Always
containers:
- name: my-nginx-demo
image: "nginx:1.17.9"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: html
volumeClaimTemplates:
- metadata:
name: html
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: managed-nfs-storage
---
apiVersion: v1
kind: Service
metadata:
name: my-nginx-demo-svc
labels:
app.kubernetes.io/name: my-nginx-demo
app.kubernetes.io/version: "1.0"
spec:
# type: ClusterIP
# type: LoadBalancer
# type: NodePort
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: my-nginx-demo
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: my-nginx-demo-ingress
spec:
rules:
- host: mynginx.pre.wuxingge.com.cn
http:
paths:
- path: /
backend:
serviceName: my-nginx-demo-svc
servicePort: 80
共享目录中创建html文件,然后查看pod状态
cat index.html
<html><body><h1>Test Page!</h1></body></html>