1.在k8s集群中的Node节点安装GlusterFS客户端
yum install gluster glusterfs-fuse -y
2.GlusterFS管理服务需要以特权模式运行,在kube-apiserver的启动参数(/etc/kubernetes/manifests/kube-apiserver.yaml)中添加:
--allow-privileged=true
3.给要部署GlusterFS管理服务的节点打上storagenode=glusterfs标签,是为了将GlusterFS容器定向部署到安装了GlusterFS的Node上:
kubectl label node node1 storagenode=glusterfs
kubectl label node node2 storagenode=glusterfs
kubectl label node node3 storagenode=glusterfs
4.在部署GlusterFS节点的机器上添加一个裸盘sdb
然后使用命令对他的文件系统进行设定
mkfs.xfs -f /dev/sdb
5.然后我们给所有的节点加载对应的模块
modprobe dm_snapshot
modprobe dm_mirror
modprobe dm_thin_pool
cat >/etc/sysconfig/modules/glusterfs.modules <<EOF
#!/bin/bash
for kernel_module in dm_snapshot dm_mirror dm_thin_pool;do
/sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe ${kernel_module}
fi
done;
EOF
chmod +x /etc/sysconfig/modules/glusterfs.modules
然后完美检查模块是否加载成功
lsmod | egrep '(dm_snapshot|dm_mirror|dm_thin_pool)'
6.我们去获取Heketi服务
wget https://github.com/heketi/heketi/releases/download/v7.0.0/heketi-client-v7.0.0.linux.amd64.tar.gz
tar -xf heketi-client-v7.0.0.linux.amd64.tar.gz
7.将/heketi-client/bin/heketi-cli拷贝到各个节点的/usr/local/bin下
cp /heketi-client/bin/heketi-cli /usr/local/bin/heketi-cli
//提前建立好免密通道
scp /heketi-client/bin/heketi-cli node1:/usr/local/bin/heketi-cli
8.部署GlusterFS
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: glusterfs
labels:
glusterfs: daemonsett
annotations:
description: GlusterFS DaemonSet
tags: glusterfs
spec:
selector:
matchLabels:
glusterfs-node: pod
template:
metadata:
name: glusterfs
labels:
glusterfs-node: pod
spec:
nodeSelector:
storagenode: glusterfs
hostNetwork: true
containers:
- image: gluster/gluster-centos:latest
name: glusterfs
volumeMounts:
- name: glusterfs-heketi
mountPath: "/var/lib/heketi"
- name: glusterfs-run
mountPath: "/run"
- name: glusterfs-lvm
mountPath: "/run/lvm"
- name: glusterfs-etc
mountPath: "/etc/glusterfs"
- name: glusterfs-logs
mountPath: "/var/log/glusterfs"
- name: glusterfs-config
mountPath: "/var/lib/glusterd"
- name: glusterfs-dev
mountPath: "/dev"
- name: glusterfs-misc
mountPath: "/var/lib/misc/glusterfsd"
- name: glusterfs-cgroup
mountPath: "/sys/fs/cgroup"
readOnly: true
- name: glusterfs-ssl
mountPath: "/etc/ssl"
readOnly: true
securityContext:
capabilities: {}
privileged: true # 特权模式
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 60
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
livenessProbe:
timeoutSeconds: 3
initialDelaySeconds: 60
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
volumes:
- name: glusterfs-heketi
hostPath:
path: "/var/lib/heketi"
- name: glusterfs-run
- name: glusterfs-lvm
hostPath:
path: "/run/lvm"
- name: glusterfs-etc
hostPath:
path: "/etc/glusterfs"
- name: glusterfs-logs
hostPath:
path: "/var/log/glusterfs"
- name: glusterfs-config
hostPath:
path: "/var/lib/glusterd"
- name: glusterfs-dev
hostPath:
path: "/dev"
- name: glusterfs-misc
hostPath:
path: "/var/lib/misc/glusterfsd"
- name: glusterfs-cgroup
hostPath:
path: "/sys/fs/cgroup"
- name: glusterfs-ssl
hostPath:
path: "/etc/ssl"
然后我们通过kubectl get pod 查看我们运行的glusterfs
[root@scmaster test1]# kubectl get pod
NAME READY STATUS RESTARTS AGE
deploy-heketi-789db4dc95-6pdt8 1/1 Running 0 133m
glusterfs-4wvg9 1/1 Running 1 3h11m
glusterfs-fgtws 1/1 Running 1 3h11m
glusterfs-wdm9p 1/1 Running 1 3h11m
9.部署Heketi服务
Heketi是一个提供RESTful API管理GlusterFS卷的框架,能够在OpenStack,Kubernetes,OpenShift等云平台上实现动态存储资源供应,支持GlusterFS多集群管理,便于管理员队GlusterFS进行操作。
当配置好后Heketi会自动完成PV,VG,LV的创建。
创建一个ServiceAccount,Role,RoleBinding(heketi-svc.yaml)
apiVersion: v1
kind: ServiceAccount
metadata:
name: heketi-service-account
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: heketi
namespace: default
rules:
- apiGroups: [""] # 空字符串,表示 Core API Group
resources: ["pods","services","endpoints"] # 这个Role只可以操作Pod\svc\ep,操作方式在下面的verbs[]
verbs: ["get","watch","list"]
- apiGroups: [""] # 空字符串,表示 Core API Group
resources: ["pods/exec"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: heketi #
namespace: default # 权限仅在该ns下起作用
subjects:
- kind: ServiceAccount # 类型是sa,说明这个sa有role为heketi的权限
name: heketi-service-account
roleRef: # 角色绑定信息
kind: Role # 绑定的对象是角色,也可以是集群角色
name: heketi # 绑定的Role name
apiGroup: rbac.authorization.k8s.io # 默认
然后再部署Heketi服务(deploy-heketi.yaml)
kind: Deployment
apiVersion: apps/v1
metadata:
name: deploy-heketi
labels:
glusterfs: heketi-deployment
deploy-heketi: heket-deployment
annotations:
description: Defines how to deploy Heketi
spec:
selector:
matchLabels:
glusterfs: heketi-pod
replicas: 1
template:
metadata:
name: deploy-heketi
labels:
glusterfs: heketi-pod
name: deploy-heketi
spec:
serviceAccountName: heketi-service-account # 使用这个SA
containers:
- image: heketi/heketi
imagePullPolicy: IfNotPresent
name: deploy-heketi
env:
- name: HEKETI_EXECUTOR
value: kubernetes
- name: HEKETI_FSTAB
value: "/var/lib/heketi/fstab"
- name: HEKETI_SNAPSHOT_LIMIT
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: "y"
- name: HEKETI_ADMIN_KEY # 如果用了这个 就这样创建heketi-cli -s $HEKETI_CLI_SERVER --user admin --secret admin123 topology load --json=topology.json
value: "admin123" # 这是下面连接要的密码可自行更改
ports:
- containerPort: 8080
volumeMounts:
- name: db
mountPath: "/var/lib/heketi"
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 3
httpGet:
path: "/hello"
port: 8080
livenessProbe:
timeoutSeconds: 3
initialDelaySeconds: 30
httpGet:
path: "/hello"
port: 8080
volumes:
- name: db
hostPath:
path: "/heketi-data"
---
kind: Service
apiVersion: v1
metadata:
name: deploy-heketi
labels:
glusterfs: heketi-service
deploy-heketi: support
annotations:
description: Exposes Heketi Service
spec:
selector:
name: deploy-heketi
ports:
- name: deploy-heketi
port: 8080
targetPort: 8080
最后通过Heketi创键GFS集群
1.需要三块裸盘(我们上面已经定义好了)
2.修改topology.json(ip,盘,node节点名字)
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"node1"
],
"storage": [
"192.168.192.162"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
},
{
"node": {
"hostnames": {
"manage": [
"node2"
],
"storage": [
"192.168.192.163"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
},
{
"node": {
"hostnames": {
"manage": [
"node3"
],
"storage": [
"192.168.192.164"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
}
]
}
]
}
通过kubectl get pod -o wide查看heketi服务的ip地址
heketi-cli -s http://10.244.135.10:8080 --user admin --secret admin123 topology load --json=topology.json
如果出现问题的话:进入节点glusterfs容器执行pvcreate -ff --metadatasize=128M --dataalignment=256K /dev/sdb然后再重新运行命令。
结果是在GlusterFS集群各个节点的/dev/sdb盘上成功创建PV和VG
10.创建StorageClass
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gluster-heketi
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Retain
parameters:
resturl: "http://10.244.135.10:8080"
restauthenabled: "true"
restuser: "admin"
restuserkey: "admin123"
gidMin: "40000"
gidMax: "50000"
volumetype: "replicate:3"
allowVolumeExpansion: true
11.在执行完前面的操作后,我们就能定义一个PVC来申请GlusterFS的存储空间了,一旦PVC生成,系统便会触发Heketi进行响应的操作,主要为在GlusterFS集群中创建brick,再创建并启动一个Volume,接下来Pod就能通过Volume的设置将这个PVC挂载到容器内部进行使用了。
先创建pvc-gluster-heketi.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name:pvc-gluster-heketi
spec:
storageClassName:gluster-heketi #这个是定义你使用了创建的storageClass
accessModes:
-readWriteOnce
resources:
requests:
storage: 10Gi
最后只需要在你创建pod时将PVC写进yaml文件就行(但是要保证pod和PVC属于同一个命名空间)
......
volumes:
- name: gluster-volume
persistentVolumeClaim:
claimName: pvc-gluster-heketi
只需验证pod是否启动即可。