Kubernetes 1.20.5实验记录–数据卷和数据持久化(GluserFS PV&PVC)
1.1 GluserFS PV&PVC
1.1.1 静态方式
1、创建GlusterFS集群(数据盘已格式化):
略
2、创建复制卷:
gluster volume create rep_vol replica 3 arbiter 1 node1:/data/glusterfs/brick node2:/data/glusterfs/brick node3:/data/glusterfs/brick
3、启用复制卷:
gluster volume start rep_vol
4、查看复制卷:
gluster volume info
5、Worker节点安装glusterfs-fuse:
yum -y install centos-release-gluster
yum -y install glusterfs-fuse
6、Worker节点配置GlusterFS集群域名解析:
echo 192.168.0.101 node1 >> /etc/hosts
echo 192.168.0.102 node2 >> /etc/hosts
echo 192.168.0.103 node3 >> /etc/hosts
7、创建Endpoints:
文件glusterfs-static-endpoints.yaml
apiVersion: v1
kind: Endpoints
metadata:
name: glusterfs-static-cluster
subsets:
- addresses:
- ip: 192.168.0.101
- ip: 192.168.0.102
- ip: 192.168.0.103
ports:
- port: 1990
protocol: TCP
kubectl apply -f glusterfs-static-endpoints.yaml
8、查看Endpoints:
kubectl get endpoints
9、创建Service:
文件glusterfs-static-service.yaml
apiVersion: v1
kind: Service
metadata:
name: glusterfs-static-cluster
spec:
ports:
- port: 1990
kubectl apply -f glusterfs-static-service.yaml
10、查看Service:
kubectl get service
11、创建PV:
文件glusterfs-static-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: glusterfs-static-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
glusterfs:
endpoints: glusterfs-static-cluster
path: rep_vol
readOnly: false
kubectl apply -f glusterfs-static-pv.yaml
12、查看PV状态:
kubectl get pv
13、创建PVC:
文件glusterfs-static-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: glusterfs-static-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
kubectl apply -f glusterfs-static-pvc.yaml
14、查看PVC状态:
kubectl get pvc
15、查看PV状态:
kubectl get pv
16、创建Pod:
文件glusterfs-static-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: glusterfs-static-pod
spec:
containers:
- name: glusterfs-static-pod
image: busybox
args:
- /bin/sh
- -c
- sleep 30000
volumeMounts:
- mountPath: /data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: glusterfs-static-pvc
kubectl apply -f glusterfs-static-pod.yaml
17、查看Pod状态:
kubectl get pod -o wide
18、查看Pod内挂载情况:
kubectl exec -it glusterfs-static-pod -- sh
19、删除Pod:
kubectl delete -f glusterfs-static-pod.yaml
20、删除PVC:
kubectl delete -f glusterfs-static-pvc.yaml
或
kubectl delete pvc glusterfs-static-pvc
21、删除PV:
kubectl delete -f glusterfs-static-pv.yaml
或
kubectl delete pvc glusterfs-static-pv
22、删除Service:
kubectl delete -f glusterfs-static-service.yaml
1.1.2 动态方式
1、创建GlusterFS集群:
略(数据盘未格式化)
2、配置heketi:
GlusterFS集群Node1节点配置:
yum -y install heketi heketi-client
配置heketi免密登录GlusterFS集群节点:
ssh-keygen -f /etc/heketi/heketi_key -t rsa -N ''
chown heketi:heketi /etc/heketi/heketi_key*
ssh-copy-id -i /etc/heketi/heketi_key.pub root@node1
ssh-copy-id -i /etc/heketi/heketi_key.pub root@node2
ssh-copy-id -i /etc/heketi/heketi_key.pub root@node3
修改heketi配置文件:
文件/etc/heketi/heketi.json
...
"executor": "ssh",
"_sshexec_comment": "SSH username and private key file information",
"sshexec": {
"keyfile": "/etc/heketi/heketi_key",
"user": "root",
"port": "22",
"fstab": "/etc/fstab"
},
启动heketi,并设置自启动:
systemctl start heketi
systemctl enable heketi
systemctl status heketi
测试heketi:
curl node1:8080/hello
添加GlusterFS节点及Device:
export HEKETI_CLI_SERVER=http://node1:8080
文件topology.json
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"192.168.0.101"
],
"storage": [
"192.168.0.101"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.0.102"
],
"storage": [
"192.168.0.102"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.0.103"
],
"storage": [
"192.168.0.103"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
}
]
}
]
}
heketi-cli topology load --json=/etc/heketi/topology.json
3、Worker节点安装glusterfs-fuse:
yum -y install centos-release-gluster
yum -y install glusterfs-fuse
4、Worker节点配置GlusterFS集群域名解析:
echo 192.168.0.101 node1 >> /etc/hosts
echo 192.168.0.102 node2 >> /etc/hosts
echo 192.168.0.103 node3 >> /etc/hosts
5、创建StorageClass:
文件glusterfs-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-storageclass
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://192.168.0.101:8080"
restauthenabled: "false"
volumetype: "replicate:3"
kubectl apply -f glusterfs-storageclass.yaml
6、查看StorageClass:
kubectl get storageclass
7、创建PVC:
文件glusterfs-dynamic-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: glusterfs-dynamic-pvc
spec:
accessModes:
- ReadWriteMany
storageClassName: glusterfs-storageclass
resources:
requests:
storage: 1Gi
kubectl apply -f glusterfs-dynamic-pvc.yaml
8、查看PVC状态:
kubectl get pvc
9、查看PV状态:
kubectl get pv
10、查看GlusterFS集群Volume信息:
heketi-cli volume list
heketi-cli volume info a5e6d1391575fd0d557725b00b6002d4
11、创建Pod:
文件glusterfs-dynamic-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: glusterfs-dynamic-pod
spec:
containers:
- name: glusterfs-dynamic-pod
image: busybox
args:
- /bin/sh
- -c
- sleep 3000
volumeMounts:
- name: data
mountPath: /data
readOnly: false
volumes:
- name: data
persistentVolumeClaim:
claimName: glusterfs-dynamic-pvc
kubectl apply -f glusterfs-dynamic-pod.yaml
12、查看Pod状态:
kubectl get pod -o wide
13、查看Pod内挂载情况:
kubectl exec -it glusterfs-dynamic-pod -- sh
14、删除Pod:
kubectl delete -f glusterfs-dynamic-pod.yaml
15、删除PVC:
kubectl delete -f glusterfs-dynamic-pvc.yaml
或
kubectl delete pvc glusterfs-dynamic-pvc
16、删除StorageClass:
kubectl delete -f glusterfs-storageclass.yaml