k8s glusterfs mysql_Kubernetes+GlusterFS使用用例

GlusterFS部署

▶ yum install centos-release-gluster -y

▶ yum install glusterfs-server

▶ mkfs.xfs /dev/vdd

▶ vim /etc/fstab

/dev/vdd /glusterfs xfs defaults 1 2

▶  mount -a && mount

▶ yum install glusterfs-server

▶ systemctl enable glusterd

▶ systemctl start glusterd

▶ systemctl status glusterd

▶ gluster peer probe sibat-kubernetes-02

▶ gluster peer probe sibat-kubernetes-03

▶ gluster peer probe sibat-kubernetes-04

▶ gluster peer probe sibat-kubernetes-05

▶  gluster peer status

▶ gluster volume create k8s-data replica 5 transport tcp sibat-kubernetes-01:/gluster sibat-kubernetes-02:/gluster sibat-kubernetes-03:/gluster sibat-kubernetes-04:/gluster sibat-kubernetes-05:/gluster force

▶ gluster volume list

k8s-data

▶ gluster volume info

Volume Name: k8s-data

Type: Replicate

Volume ID: 658b9a86-6cf8-4dcc-a8b8-e581381d5608

Status: Created

Snapshot Count: 0

Number of Bricks: 1 x 5 = 5

Transport-type: tcp

Bricks:

Brick1: sibat-kubernetes-01:/gluster

Brick2: sibat-kubernetes-02:/gluster

Brick3: sibat-kubernetes-03:/gluster

Brick4: sibat-kubernetes-04:/gluster

Brick5: sibat-kubernetes-05:/gluster

Options Reconfigured:

transport.address-family: inet

storage.fips-mode-rchecksum: on

nfs.disable: on

performance.client-io-threads: off

▶  gluster volume start k8s-data

volume start: k8s-data: success

▶  gluster volume quota k8s-data enable

volume quota : success

▶ gluster volume quota k8s-data limit-usage / 10000GB

volume quota : success

heketi

▶ yum install -y heketi heketi-client

▶ cat /etc/heketi/heketi.json

{

"port": "18080",

"use_auth": true,

"jwt": {

"admin": {

"key": "brunutRaspuWRe1404"

},

"user": {

"key": "brunutR2020"

}

},

"glusterfs": {

"executor": "ssh",

"sshexec": {

"keyfile": "/etc/heketi/heketi_key",

"user": "root",

"port": "22",

"fstab": "/etc/fstab"

},

"db": "/var/lib/heketi/heketi.db",

"loglevel" : "debug"

}

}

▶ cat /etc/heketi/topolgy_demo.json

{

"clusters": [

{

"nodes": [

{

"node": {

"hostnames": {

"manage": [

"sibat-kubernetes-1"

],

"storage": [

"192.168.233.11"

]

},

"zone": 1

},

"devices": [

"/dev/vdc"

]

},

{  "node": {

"hostnames": {

"manage": [

"sibat-kubernetes-2"

],

"storage": [

"192.168.233.212"

]

},

"zone": 1

},

"devices": [

"/dev/vdc"

]

},

{ "node": {

"hostnames": {

"manage": [

"sibat-kubernetes-3"

],

"storage": [

"192.168.233.108"

]

},

"zone": 1

},

"devices": [

"/dev/vdc"

]

},

{ "node": {

"hostnames": {

"manage": [

"sibat-kubernetes-4"

],

"storage": [

"192.168.233.64"

]

},

"zone": 1

},

"devices": [

"/dev/vdc"

]

},

{ "node": {

"hostnames": {

"manage": [

"sibat-kubernetes-5"

],

"storage": [

"192.168.233.96"

]

},

"zone": 1

},

"devices": [

"/dev/vdc"

]

}

]

}

]

}

▶  heketi-cli --server [http://192.168.233.247:18080](http://192.168.233.247:18080/) --user admin --secret brunutRaspuWRe1404 topology load --json=/etc/heketi/topolgy.json

Found node sibat-kubernetes-01 on cluster 0c085268e5bc20f7ac434d6aaddc4ca6

Adding device /dev/vdd ... OK

Found node sibat-kubernetes-03 on cluster 0c085268e5bc20f7ac434d6aaddc4ca6

Adding device /dev/vdd ... OK

Found node sibat-kubernetes-02 on cluster 0c085268e5bc20f7ac434d6aaddc4ca6

Adding device /dev/vdd ... Unable to add device: Setup of device /dev/vdd failed (already initialized or contains data?):   Device /dev/vdd not found.

Found node sibat-kubernetes-04 on cluster 0c085268e5bc20f7ac434d6aaddc4ca6

Adding device /dev/vdd ... OK

Found node sibat-kubernetes-05 on cluster 0c085268e5bc20f7ac434d6aaddc4ca6

Adding device /dev/vdd ... OK

▶ ssh-keygen -f /etc/heketi/heketi\_key -t rsa -N ''

Generating public/private rsa key pair.

Your identification has been saved in /etc/heketi/heketi\_key.

Your public key has been saved in /etc/heketi/heketi\_key.pub.

The key fingerprint is:

SHA256:KphU9aFUhWCzm75QA1grMIm6MWILFoMGVPDAQcBYYFM root@sibat-kubernetes-1

The key's randomart image is:

+---\[RSA 2048\]----+

|^%\*E. \*ooo.      |

|\*O+o = =..       |

|o =.+ o .        |

|\*o o . o         |

|=+o   = S        |

|.o o o o         |

|  o o o          |

|     o .         |

|      .          |

+----\[SHA256\]-----+

2020年 04月 14日 星期二 23:32:05 EDT sibat-kubernetes-1 root:~

▶ chown heketi:heketi  /etc/heketi/heketi\*

▶ heketi-cli cluster info 0c085268e5bc20f7ac434d6aaddc4ca6

Cluster id: 0c085268e5bc20f7ac434d6aaddc4ca6

Nodes:

5d27f7967a32032fc7343ef51f9c139e

64f24f93443adbccb47bf0dc52a8ca85

987050d88911b2d8dec7faf796f88b76

a6d29c2fd347dae4a646cde5937c5dd6

a8f08e5c6fb6c1020796efcbcad9c06a

Volumes:

Block: true

File: true

▶ heketi-cli volume create --size=5

Name: vol_b91e90468865c4bf1518b6943882be5e

Size: 5

Volume Id: b91e90468865c4bf1518b6943882be5e

Cluster Id: 0c085268e5bc20f7ac434d6aaddc4ca6

Mount: 192.168.233.247:vol_b91e90468865c4bf1518b6943882be5e

Mount Options: backup-volfile-servers=192.168.233.69,192.168.233.142,192.168.233.194,192.168.233.64

Block: false

Free Size: 0

Reserved Size: 0

Block Hosting Restriction: (none)

Block Volumes: []

Durability Type: replicate

Distribute Count: 1

Replica Count: 3

创建StorageClass

▶ vim gluster-sc.yaml

apiVersion: storage.k8s.io/v1

kind: StorageClass

metadata:

name: glusterfs

annotations:

storageclass.beta.kubernetes.io/is-default-class: true

storageclass.kubernetes.io/is-default-class: true

provisioner: kubernetes.io/glusterfs

reclaimPolicy: Delete

parameters:

resturl: "http://192.168.233.11:18080"

restauthenabled: "true"

restuser: "admin"

restuserkey: "brunutRaspuWRe1404"

gidMin: "40000"

gidMax: "50000"

volumetype: "replicate:3"

使用用例

apiVersion: apps/v1

kind: StatefulSet

metadata:

name: jenkins

namespace: devops

labels:

name: jenkins

spec:

selector:

matchLabels:

name: jenkins

serviceName: jenkins

replicas: 1

updateStrategy:

type: RollingUpdate

template:

metadata:

name: jenkins

labels:

name: jenkins

spec:

terminationGracePeriodSeconds: 10

serviceAccountName: jenkins

containers:

- name: jenkins

image: jenkins/jenkins:lts

imagePullPolicy: Always

ports:

- containerPort: 8080

- containerPort: 50000

resources:

limits:

cpu: 4

memory: 8Gi

requests:

cpu: 0.5

memory: 500Mi

env:

- name: LIMITS_MEMORY

valueFrom:

resourceFieldRef:

resource: limits.memory

divisor: 1Mi

- name: JAVA_OPTS

# value: -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -XX:MaxRAMFraction=1 -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85

value: -Xmx$(LIMITS_MEMORY)m -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85

- name: JENKINS_OPTS

value: --prefix=/jenkins

volumeMounts:

- name: jenkins-home

mountPath: /var/jenkins_home

livenessProbe:

httpGet:

path: /jenkins/login

port: 8080

initialDelaySeconds: 60

timeoutSeconds: 5

failureThreshold: 12 # ~2 minutes

readinessProbe:

httpGet:

path: /jenkins/login

port: 8080

initialDelaySeconds: 60

timeoutSeconds: 5

failureThreshold: 12 # ~2 minutes

securityContext:

fsGroup: 1000

volumeClaimTemplates:

- metadata:

name: jenkins-home

annotations:

volume.beta.kubernetes.io/storage-class: glusterfs

spec:

accessModes: [ "ReadWriteOnce" ]

resources:

requests:

storage: 100Gi

---

apiVersion: v1

kind: Service

metadata:

name: jenkins

namespace: devops

spec:

type: NodePort

selector:

name: jenkins

# ensure the client ip is propagated to avoid the invalid crumb issue when using LoadBalancer (k8s >=1.7)

#externalTrafficPolicy: Local

ports:

-

name: http

port: 80

targetPort: 8080

nodePort: 32000

protocol: TCP

-

name: agent

port: 50000

protocol: TCP

PVC使用实例

apiVersion: v1

kind: PersistentVolumeClaim

metadata:

name: jnlp-mvn

namespace: devops

spec:

accessModes:

- ReadWriteMany

storageClassName: glusterfs

resources:

requests:

storage: 50Gi

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
您好,关于使用GlusterFS作为KVM后端存储的问题,我可以提供一些思路和参考。 GlusterFS是一个分布式文件系统,可以将多个物理存储空间整合成一个逻辑卷,提供高可用性和可扩展性。在KVM虚拟化环境中,可以将GlusterFS作为后端存储,用于存储虚拟机的磁盘镜像和配置文件等。 以下是一些使用GlusterFS作为KVM后端存储的步骤和注意事项: 1. 首先,在GlusterFS集群中创建一个分布式卷,将多个存储空间整合成一个逻辑卷。可以使用命令行工具或者Web管理界面来创建和管理GlusterFS卷。 2. 然后,在KVM主机中安装GlusterFS客户端软件,以便KVM主机可以访问GlusterFS卷。可以使用命令行工具或者包管理工具来安装GlusterFS客户端软件。 3. 接下来,在KVM主机中创建一个存储池,将GlusterFS卷作为后端存储。可以使用命令行工具或者图形界面来创建和管理KVM存储池。 4. 最后,在KVM虚拟机中使用GlusterFS卷作为磁盘镜像和配置文件存储路径。可以在虚拟机创建时指定使用的存储池和存储路径,也可以在虚拟机运行时动态修改。 需要注意的是,使用GlusterFS作为KVM后端存储需要考虑以下因素: 1. 网络带宽和延迟:由于GlusterFS是一个分布式文件系统,需要通过网络进行数据传输,因此网络带宽和延迟可能会影响性能和稳定性。 2. GlusterFS卷的可用性和可靠性:需要确保GlusterFS卷的可用性和可靠性,以避免数据丢失或虚拟机无法启动等问题。 3. 存储池的管理和维护:需要定期检查和维护存储池,以确保数据一致性和存储空间的充足性。 希望以上信息对您有所帮助!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值