#配置host
[root@mysql-salve2 gfsmnt]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.2.152 mysql-master1
192.168.2.153 mysql-salve1
192.168.2.150 mysql-master2
192.168.2.151 mysql-salve2
192.168.2.154 mycat
#在三个节点都安装glusterfs
yum install centos-release-gluster -y
yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma -y
#启动 glusterFS
systemctl start glusterd.service
systemctl enable glusterd.service
#将 节点 加入到 集群中,任意一节点执行接可
gluster peer probe k8s-master1-node1
gluster peer probe k8s-master2-node2
gluster peer probe node3-nfs
gluster peer probe mysql-master2
#gluster peer probe 节点主机名
#查看集群状态
gluster peer status
#创建数据存储目录:
[root@swarm-manager ~]#mkdir -p /opt/gluster/data
[root@swarm-node-1 ~]# mkdir -p /opt/gluster/data
[root@swarm-node-2 ~]# mkdir -p /opt/gluster/data
#创建GlusterFS磁盘:#gluster volume create 卷名称 replica 2 transport tcp 节点主机名称:/目录 node节点主机名称:/目录6E6-C2B3DB210CE9}
gluster volume create test replica 3 transport tcp k8s-master1-node1:/opt/gluster/data k8s-master2-node2:/opt/gluster/data node3-nfs:/opt/gluster/data force
[root@k8s-master1-node1 ~]# gluster volume create test replica 3 transport tcp k8s-master1-node1:/opt/gluster/data k8s-master2-node2:/opt/gluster/data node3-nfs:/opt/gluster/data force
volume create: test: success: please start the volume to access data
#再查看 volume 状态:
[root@swarm-manager ~]#gluster volume info
#启动test卷
gluster volume start test
增加节点
Glusterpeer probe IP/主机名
删除节点
Gluserpeer detach IP/主机名
节点状态
Glusterpeer status
YYQ运维技术博客_运维的工作学习之路
https://www.yeyouqing.top
https://yeyouqing.top
yeyouqing.top
www.yeyouqing.top