所有节点安装heketi-client
yum install -y heketi-client heketi
配置服务
cd /etc/heketi/
cp heketi.json heketi.json-abk
cat /etc/heketi/heketi.json
{
"_port_comment": "Heketi Server Port Number",
"port": "8080",
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth": true,
"_jwt": "Private keys for access",
"jwt": {
"_admin": "Admin has access to all APIs",
"admin": {
"key": "admin@123"
},
"_user": "User only has access to /volumes endpoint",
"user": {
"key": "user@123"
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs": {
"_executor_comment": [
"Execute plugin. Possible choices: mock, ssh",
"mock: This setting is used for testing and development.",
" It will not send commands to any node.",
"ssh: This setting will notify Heketi to ssh to the nodes.",
" It will need the values in sshexec to be configured.",
"kubernetes: Communicate with GlusterFS containers over",
" Kubernetes exec api."
],
"executor": "ssh",
"_sshexec_comment": "SSH username and private key file information",
"sshexec": {
"keyfile": "/etc/heketi/heketi_key",
"user": "root",
"port": "22",
"fstab": "/etc/fstab"
},
"_kubeexec_comment": "Kubernetes configuration",
"kubeexec": {
"host" :"https://kubernetes.host:8443",
"cert" : "/path/to/crt.file",
"insecure": false,
"user": "kubernetes username",
"password": "password for kubernetes user",
"namespace": "OpenShift project or Kubernetes namespace",
"fstab": "Optional: Specify fstab file on node. Default is /etc/fstab"
},
"_db_comment": "Database file name",
"db": "/var/lib/heketi/heketi.db",
"_loglevel_comment": [
"Set log level. Choices are:",
" none, critical, error, warning, info, debug",
"Default is warning"
],
"loglevel" : "warning"
}
}
设置heketi免密访问GlusterFS
# 选择ssh执行器,heketi服务器需要免密登陆GlusterFS集群的各节点;
# -t:秘钥类型;
# -q:安静模式;
# -f:指定生成秘钥的目录与名字,注意与heketi.json的ssh执行器中"keyfile"值一致;
# -N:秘钥密码,””即为空
[root@heketi ~]# ssh-keygen -t rsa -q -f /etc/heketi/heketi_key -N ""
# heketi服务由heketi用户启动,heketi用户需要有新生成key的读赋权,否则服务无法启动
[root@heketi ~]# chown heketi:heketi /etc/heketi/heketi_key
# 分发公钥至GlusterFS主机;
# -i:指定公钥
[root@heketi ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@10.98.15.183
[root@heketi ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@10.98.15.182
启动heketi
systemctl enable heketi
systemctl start heketi
systemctl status heketi
验证
# curl http://127.0.0.1:8080/hello
Hello from Heketi
设置GlusterFS集群
# 通过topology.json文件定义组建GlusterFS集群;
# topology指定了层级关系:clusters-->nodes-->node/devices-->hostnames/zone;
# node/hostnames字段的manage填写主机ip,指管理通道,在heketi服务器不能通过hostname访问GlusterFS节点时不能填写hostname;
# node/hostnames字段的storage填写主机ip,指存储数据通道,与manage可以不一样;
# node/zone字段指定了node所处的故障域,heketi通过跨故障域创建副本,提高数据高可用性质,如可以通过rack的不同区分zone值,创建跨机架的故障域;
# devices字段指定GlusterFS各节点的盘符(可以是多块盘),必须是未创建文件系统的裸设备
#本次两台glusterfs主机添加硬盘/dev/sdb
# cat /etc/heketi/topology.json
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"10.98.15.182"
],
"storage": [
"10.98.15.182"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
},
{
"node": {
"hostnames": {
"manage": [
"10.98.15.183"
],
"storage": [
"10.98.15.183"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
}
]
}
]
}
通过topology.json主机GlusterFS集群
# GlusterFS集群各节点的glusterd服务已正常启动,但不必组建受信存储池;
# heketi-cli命令行也可手动逐层添加cluster,node,device,volume等;
# “--server http://localhost:8080”:localhost执行heketi-cli时,可不指定;
# ”--user admin --secret jbjb@333 “:heketi.json中设置了认证,执行heketi-cli时需要带上认证信息,否则报”Error: Invalid JWT token: Unknown user”错
# glusterfs节点添加磁盘后无需格式化及挂载
# heketi-cli --server http://localhost:8080 --user admin --secret jbjb@333 topology load --json=/etc/heketi/topology.json
查看集群信息
# 查看heketi topology信息,此时volume与brick等未创建;
# 通过”heketi-cli cluster info“可以查看集群相关信息;
# 通过”heketi-cli node info“可以查看节点相关信息;
# 通过”heketi-cli device info“可以查看device相关信息
# 通过heketi-cli --user admin --secret admin@123 topology info查看topology相关信息
#查看集群信息 需要加集群cluster ID
heketi-cli --user admin --secret admin@123 cluster info 7842538a220449d77c16548a032cdcfa
查看node列表
[root@gfs ~]# heketi-cli --user admin --secret admin@123 node list
Id:a7084475f502c999c353f95607590be6 Cluster:7842538a220449d77c16548a032cdcfa
Id:f0dd2cb95bfcb537e3edbf82d56a33a3 Cluster:7842538a220449d77c16548a032cdcfa
查看node详细信息info 需要加node id
[root@gfs ~]# heketi-cli --user admin --secret admin@123 node info a7084475f502c999c353f95607590be6
Node Id: a7084475f502c999c353f95607590be6
State: online
Cluster Id: 7842538a220449d77c16548a032cdcfa
Zone: 1
Management Hostname: 10.98.15.182
Storage Hostname: 10.98.15.182
Devices:
Id:f7d92bcb39009ffd3e7f4786dc1674b1 Name:/dev/sdb State:online Size (GiB):6 Used (GiB):2 Free (GiB):4
查看磁盘详细信息info需要加磁盘ID
#通过命令
heketi-cli --user admin --secret admin@123 topology info
显示磁盘device加ID
[root@gfs ~]# heketi-cli --user admin --secret admin@123 device info f7d92bcb39009ffd3e7f4786dc1674b1
Device Id: f7d92bcb39009ffd3e7f4786dc1674b1
Name: /dev/sdb
State: online
Size (GiB): 6
Used (GiB): 2
Free (GiB): 4
Bricks:
Id:35036b67795fbadade84b93729f59a17 Size (GiB):2 Path: /var/lib/heketi/mounts/vg_f7d92bcb39009ffd3e7f4786dc1674b1/brick_35
####
K8S集群动态挂载GlusterFS存储
动态模式(Dynamic):集群管理员不需要手工创建PV,而是通过StorageClass的设置对后端存储进行描述,标记为某种"类型(Class)";此时要求PVC对存储的类型进行说明,系统将自动完成PV的创建及与PVC的绑定;PVC可以声明Class为"",说明PVC禁止使用动态模式。
1 集群管理员预先创建存储类(StorageClass);
2. 用户创建使用存储类的持久化存储声明(PVC:PersistentVolumeClaim);
3. 存储持久化声明通知系统,它需要一个持久化存储(PV: PersistentVolume);
4.系统读取存储类的信息;
5.系统基于存储类的信息,在后台自动创建PVC需要的PV;
6.用户创建一个使用PVC的Pod;
7.Pod中的应用通过PVC进行数据的持久化;
8.而PVC使用PV进行数据的最终持久化处理。
定义StorageClass
# provisioner:表示存储分配器,需要根据后端存储的不同而变更;
# reclaimPolicy: 默认即”Delete”,删除pvc后,相应的pv及后端的volume,brick(lvm)等一起删除;设置为”Retain”时则保留数据,需要手工处理
# resturl:heketi API服务提供的url;
# restauthenabled:可选参数,默认值为”false”,heketi服务开启认证时必须设置为”true”;
# restuser:可选参数,开启认证时设置相应用户名;
# secretNamespace:可选参数,开启认证时可以设置为使用持久化存储的namespace;
# secretName:可选参数,开启认证时,需要将heketi服务的认证密码保存在secret资源中;
# clusterid:可选参数,指定集群id,也可以是1个clusterid列表,格式为”id1,id2”;
# volumetype:可选参数,设置卷类型及其参数,如果未分配卷类型,则有分配器决定卷类型;如”volumetype: replicate:3”表示3副本的replicate卷,”volumetype: disperse:4:2”表示disperse卷,其中‘4’是数据,’2’是冗余校验,”volumetype: none”表示distribute卷#
cat gluster-heketi-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gluster-heketi-storageclass
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete <<<----自动删除pvc
parameters:
resturl: "http://10.98.15.182:8080"
clusterid: "7842538a220449d77c16548a032cdcfa"
restauthenabled: "true"
restuser: "admin"
restuserkey: "admin@123"
gidMin: "2000"
gidMax: "5000"
volumetype: "replicate:2"
allowVolumeExpansion: true
(注意此方法官网已弃用)
restuserkey:Gluster REST 服务/Heketi 用户的密码将被用于对 REST 服务器进行身份验证。此参数已弃用,取而代之的是 secretNamespace + secretName。
参考k8s官方链接
https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#glusterfs
以下最新方法定义storageclass
[root@master-63 storage]# echo -n admin@123 | base64
YWRtaW5AMTIz
不用定义namespace
[root@master-63 storage]# cat heketi-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
data:
key: YWRtaW5AMTIz
# base64 encoded password. E.g.: echo -n "mypassword" | base64
type: kubernetes.io/glusterfs
注意storageclass资源中namespace必须定义secretNamespace: “default”
cat gluster-heketi-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gluster-heketi-storageclass
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Retain
parameters:
resturl: "http://10.98.15.182:8080"
clusterid: "7842538a220449d77c16548a032cdcfa"
restauthenabled: "true"
restuser: "admin"
secretName: "heketi-secret"
secretNamespace: "default"
gidMin: "2000"
gidMax: "5000"
volumetype: "replicate:2"
allowVolumeExpansion: true
定义PVC
cat heiket-pvc.yaml
ReadWriteOnce:简写RWO,读写权限,且只能被单个node挂载;
ReadOnlyMany:简写ROX,只读权限,允许被多个node挂载;
ReadWriteMany:简写RWX,读写权限,允许被多个node挂载;
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gluster-heketi-pvc
spec:
storageClassName: gluster-heketi-storageclass
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
创建完查看
[root@master-63 storage]# kubectl describe pvc gluster-heketi-pvc
Name: gluster-heketi-pvc
Namespace: default
StorageClass: gluster-heketi-storageclass
Status: Bound
Volume: pvc-ee00a9a7-636f-41ec-895f-d41f75bd7142
Labels: <none>
Annotations: pv.kubernetes.io/bind-completed: yes
pv.kubernetes.io/bound-by-controller: yes
volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/glusterfs
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 2Gi
Access Modes: RWX
VolumeMode: Filesystem
Mounted By: <none>
Events: <none>
查看volume的具体信息:2副本的replicate卷;
另有”vgscan”,”vgdisplay”也可查看逻辑卷组信息等
[root@glusterfs01 ~]# gluster volume list
[root@glusterfs01 ~]# gluster volume info
查看
glusterfs创建了一个大小为2G的LVM
[root@gfs ~]# lvdisplay
--- Logical volume ---
LV Name tp_35036b67795fbadade84b93729f59a17
VG Name vg_f7d92bcb39009ffd3e7f4786dc1674b1
LV UUID xvcnSm-8XQt-IFTn-94LK-Rjf0-fhpj-FVdagH
LV Write Access read/write (activated read only)
LV Creation host, time gfs, 2020-06-11 18:04:22 +0800
LV Pool metadata tp_35036b67795fbadade84b93729f59a17_tmeta
LV Pool data tp_35036b67795fbadade84b93729f59a17_tdata
LV Status available
# open 3
LV Size 2.00 GiB
Allocated pool data 1.31%
Allocated metadata 10.35%
Current LE 512
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:2
创建POD
apiVersion: apps/v1
kind: Deployment
metadata:
name: http-v1
labels:
server: http-v1
app: web
spec:
replicas: 1
selector:
matchLabels:
server: http-v1
app: web
template:
metadata:
name: http-v1
labels:
server: http-v1
app: web
spec:
containers:
- name: http-v1
image: registry.cn-shenzhen.aliyuncs.com/jbjb/test:nginx-v01
ports:
- containerPort: 80
volumeMounts:
- name: storage001
mountPath: "/abc/data"
volumes:
- name: storage001
persistentVolumeClaim:
claimName: gluster-heketi-pvc
---
apiVersion: v1
kind: Service
metadata:
name: http-v1
spec:
type: NodePort
selector:
server: http-v1
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
nodePort: 3838
查看已挂载
登录Pod创建文件
然后去GlusterFS主机查看
GlusterFS快照
vol_a7e777cc3fc46f8ec227dc34255a703f 该卷做快照
创建快照
gluster snapshot create k8s-data vol_a7e777cc3fc46f8ec227dc34255a703f
设置快照可维护
[root@gfs ~]# gluster volume set vol_a7e777cc3fc46f8ec227dc34255a703f features.uss enable
volume set: success
[root@gfs ~]# gluster snapshot info
Snapshot : k8s-data_GMT-2020.06.12-01.56.12
Snap UUID : d973521a-e5a0-41bf-87a3-102b51416bca
Created : 2020-06-12 01:56:12
Snap Volumes:
Snap Volume Name : f295833229834f5ba50b328b940b0fcc
Origin Volume name : vol_a7e777cc3fc46f8ec227dc34255a703f
Snaps taken for vol_a7e777cc3fc46f8ec227dc34255a703f : 1
Snaps available for vol_a7e777cc3fc46f8ec227dc34255a703f : 255
Status : Started
激活快照
gluster snapshot activate k8s-data_GMT-2020.06.12-01.56.12
查看
[root@gfs ~]# gluster snapshot info
Snapshot : k8s-data_GMT-2020.06.12-01.56.12
Snap UUID : d973521a-e5a0-41bf-87a3-102b51416bca
Created : 2020-06-12 01:56:12
Snap Volumes:
Snap Volume Name : f295833229834f5ba50b328b940b0fcc
Origin Volume name : vol_a7e777cc3fc46f8ec227dc34255a703f
Snaps taken for vol_a7e777cc3fc46f8ec227dc34255a703f : 1
Snaps available for vol_a7e777cc3fc46f8ec227dc34255a703f : 255
Status : Started
删除POD顺序
删除pod pvc storageclass
storageclass配置的reclaimPolicy策略是删除delete
kubectl delete -f gluster-heketi-pod.yaml
kubectl delete -f gluster-heketi-pvc.yaml
不需要删除storageclass即可以在glusterfs上删除gluster volume 硬盘挂载 lv等
heketi-cli --user admin --secret admin@123 topology info
找到他的ID然后删a7e777cc3fc46f8ec227dc34255a703f
[root@gfs ~]# heketi-cli --user admin --secret admin@123 volume delete a7e777cc3fc46f8ec227dc34255a703f
Volume a7e777cc3fc46f8ec227dc34255a703f deleted
查看全部已空
查看全部回收了磁盘
启动好Pod以后停止heketi
systemctl stop heketi
heketi停止不会影响正常使用,会影响下次创建pvc
PS:如果在heketi停止期间通过kubectl删除了pvc则不会自动删除gluster volume 逻辑卷LVM磁盘挂载等信息