本实验为配置NFS存储类为mongodb提供持久化存储卷,创建3站点mongo集群(1主2从),通过nosqlbooster客户端管理工具连接验证。
1、配置NFS服务
在管理节点(10.0.0.1)上部署NFS服务,也可以用独立的服务器部署NFS服务端,共享的数据目录为/home/pvdata/share(777权限),在此之前需要在每个node上安装 nfs-utils与rpcbind软件(具体可参考https://blog.csdn.net/m0_37939350/article/details/108068192第一章节)
[root@k8s-node01 storageclass]# cat /etc/exports
/home/pvdata/share *(rw,sync,insecure,no_subtree_check,no_root_squash) #PV动态供应
[root@k8s-node01 nginx]#exportfs -r #nfs配置生效
[root@k8s-node01 nginx]#exportfs #查看NFS共享目录
2、创建nfs-provisioner
[root@k8s-node01 storageclass]# cat serviceaccount.yaml #配置nfs-provisioner 的SA账号
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
[root@k8s-node01 storageclass]# kubectl apply -f serviceaccount.yaml #执行该文件
[root@k8s-node01 storageclass]# cat service-rbac.yaml #对sa账号做rbac授权,RBAC(Role-Based Access control)基于角色的访问控制;
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-provisioner
apiGroup: rbac.authorization.k8s.io
[root@k8s-node01 storageclass]# kubectl apply -f service-rbac.yaml #执行该文件
[root@k8s-node01 storageclass]# cat nfs-provisioner-deploy.yaml #创建pod用来运行nfs-provisioner
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner
spec:
selector:
matchLabels:
app: nfs-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: registry.cn-hangzhou.aliyuncs.com/open-ali/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: jim.com/nfs #可自定义
- name: NFS_SERVER
value: 10.0.0.1 #对应NFS服务器IP地址
- name: NFS_PATH
value: /home/pvdata/share #对应NFS配置好的共享目录
volumes:
- name: nfs-client-root
nfs:
server: 10.0.0.1 #对应NFS服务器IP地址
path: /home/pvdata/share #对应NFS配置好的共享目录
[root@k8s-node01 storageclass]# kubectl apply -f nfs-provisioner-deploy.yaml #执行该文件
[root@k8s-node01 storageclass]# kubectl get pod -l app=nfs-provisioner #检查生成的pod;
NAME READY STATUS RESTARTS AGE
nfs-provisioner-6bbc9fcd47-79m8z 1/1 Running 0 80m
3、配置storageclass存储类
[root@k8s-node01 storageclass]# cat storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: nfs-storageclass #命名,可自定义,后面PVC需要引用
provisioner: jim.com/nfs #对应环境变量PROVISIONER_NAME的值
[root@k8s-node01 storageclass]# kubectl apply -f storageclass.yaml #执行该文件
[root@k8s-node01 storageclass]# kubectl get storageclasses #检查该存储类是否创建成功
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-storageclass jim.com/nfs Delete Immediate false 87m
到此NFS StorageClass存储类服务创建成功!!!
4、部署mongoDB资源
[root@k8s-node01 mongodb]# cat mongo_sta_nodeport.yaml
apiVersion: v1
kind: Service
metadata:
name: mongo
labels:
app: mongo
spec:
ports:
- name: mongo
port: 27017
targetPort: 27017
clusterIP: None
selector:
app: mongo
---
apiVersion: v1
kind: Service
metadata:
name: mongo-service #配置外网访问,用于mongo客户端管理工具的管理;
labels:
app: mongo
spec:
ports:
- name: mongo-http
port: 27017
selector:
app: mongo
type: NodePort
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: mongo-default-view #配置rbac高级访问权限
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
name: default
namespace: default
---
apiVersion: apps/v1
kind: StatefulSet #通过statefulset部署mongo有状态应用;
metadata:
name: mongo
spec:
selector:
matchLabels:
app: mongo
serviceName: "mongo"
replicas: 3 #3个副本;
template:
metadata:
labels:
app: mongo
role: mongo
environment: test
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongo
image: mongo:3.4
command:
- mongod
- "--replSet"
- rs0
- "--bind_ip"
- 0.0.0.0
- "--smallfiles"
- "--noprealloc"
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-persistent-storage
mountPath: /data/db
- name: mongo-sidecar #作为MongoDB集群的管理者,将使用此Headless Service来维护各个MongoDB实例之间的集群关系,以及集群规模变化时的自动更新。
image: cvallance/mongo-k8s-sidecar
env:
- name: MONGO_SIDECAR_POD_LABELS
value: "role=mongo,environment=test"
volumeClaimTemplates:
- metadata:
name: mongo-persistent-storage
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-storageclass" #指定上面创建的NFS存储类,用于持久化存储mongoDB数据库文件;
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
[root@k8s-node01 mongodb]# kubectl apply -f mongo_sta_nodeport.yaml #运行以上yaml文件,创建mongo资源;
[root@k8s-node01 share]# kubectl get pod -l app=mongo -o wide #查看创建好的mongo pod信息;
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
mongo-0 2/2 Running 0 2m40s 10.122.58.240 k8s-node02 <none> <none>
mongo-1 2/2 Running 0 2m20s 10.122.135.145 k8s-node03 <none> <none>
mongo-2 2/2 Running 0 2m 10.122.58.241 k8s-node02 <none> <none>
[root@k8s-node01 share]# kubectl get svc -l app=mongo -o wide #查看mongo的服务资源,记住nodeport自动分配的端口号(32131),后面客户端管理软件连接时需要用到;
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
mongo ClusterIP None <none> 27017/TCP 3m33s app=mongo
mongo-service NodePort 10.10.233.130 <none> 27017:32131/TCP 3m33s app=mongo
5、验证mongoDB集群
[root@k8s-node01 share]# curl http://172.25.83.130: