lsblk 查看磁盘
lsblk
配置host
sudo vim /etc/resolv.conf
/etc/hosts
192.168.1.20 machine_1_name
192.168.1.10 machine_2_name
安装glusterfs
sudo apt install glusterfs-server
Configure the Trusted Pool
机器1:
sudo systemctl start glusterd
sudo systemctl enable glusterd
sudo gluster peer probe machine_2_name
gluster peer status
机器2
sudo systemctl start glusterd
sudo systemctl enable glusterd
sudo gluster peer probe machine_1_name
gluster peer status
在机器1和机器2上执行
sudo mkdir -p /glusterfs/brick1
查看硬盘的UUID
blkid
ls -l /dev/disk/by-uuid/
在两台机器上编辑/etc/fstab
机器1:
UUID=f78340fa-1026-40e5-9210-cb8ee140acb0 /glusterfs/brick1 ext4 defaults 0 0
# glusterfs volume
machine_1_name:/gv0 /data glusterfs defaults 0 2
机器2:
UUID=f78340fa-3242-40e5-9210-cb8ee140acb0 /glusterfs/brick1 ext4 defaults 0 0
# glusterfs volume
machine_2_name:/gv0 /data glusterfs defaults 0 2
挂载 (将 /etc/fstab 中定义的所有档案系统挂上。)
sudo mount -a
在其中任意一个机器上执行
sudo gluster volume create gv0 machine_2_name:/glusterfs/brick1/gv0 machine_1_name:/glusterfs/brick1/gv0
gluster volume start gv0
查看gluster 状态
sudo gluster volume status
查看gluster 逻辑卷状态
sudo gluster volume info
客户端机器使用gluster
yum -y install glusterfs-client #安装glusterfs-client客户端
mkdir /mnt/glusterfs #创建挂载目录
mount.glusterfs 192.168.56.11:/gv1 /mnt/glusterfs/ #挂载/gv1
自动挂载卷:
编辑 /etc/fstab
语法格式:HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR glusterfs defaults,_netdev 0 0
举个例子:
192.168.56.11:/gv1 /mnt/glusterfs glusterfs defaults 0 2
更多详细的命令见 doc
例如:
删除卷
gluster volume stop gv0
gluster volume destory gv0
gluster volume delete vg0
卷扩容
GlusterFS支持在线进行卷的扩展。
如果添加的节点还不是集群中的节点,需要使用下面命令添加到集群
[root@gluster-node1 ~]# gluster peer probe gluster-node3 #添加gluster-node3到集群
[root@gluster-node1 ~]# gluster volume add-brick gv0 gluster-node3:/storage/brick1 force #扩展gv0卷
[root@gluster-node1 ~]# gluster volume rebalance gv0 start #添加后,重新平衡卷以确保文件分发到新添加的brick
合并卷
gluster peer probe 10.0.21.243 # 加节点
gluster peer probe 10.0.21.244 # 加节点
gluster volume add-brick gv0 10.0.21.243:/glusterfs/brick1/gv0 10.0.21.244:/glusterfs/brick1/gv0 # 合并卷
收缩卷(收缩卷之前需要将数据移动到其它的位置)
[root@gluster-node1 ~]# gluster volume remove-brick gv0 gluster-node3:/storage/brick1 start #删除brick
[root@gluster-node1 ~]# gluster volume remove-brick gv0 gluster-node3:/storage/brick1 status #查看remove brick操作的状态
[root@gluster-node1 ~]# gluster volume remove-brick gv0 gluster-node3:/storage/brick1 commit #显示completed状态后,提交remove-brick操作
gluster volume remove-brick gv0 10.0.21.243:/glusterfs/brick1/gv0 10.0.21.244:/glusterfs/brick1/gv0 start # 开始迁移
gluster volume remove-brick gv0 10.0.21.243:/glusterfs/brick1/gv0 10.0.21.244:/glusterfs/brick1/gv0 status #查看迁移的状态
gluster volume remove-brick gv0 10.0.21.243:/glusterfs/brick1/gv0 10.0.21.244:/glusterfs/brick1/gv0 commit # 迁移完成提交
迁移卷
要替换分布式卷上的brick,需要添加一个新的brick,然后删除要替换的brick。在替换的过程中会触发重新平衡的操作,会将移除的brick中的数据到新加入的brick中。
gluster peer probe 10.0.21.245 #将数据迁246移到245,需要将245加入节点
gluster volume replace-brick gv0 10.0.21.246:/glusterfs/brick1/gv0 10.0.21.245:/glusterfs/brick1/gv0 start # 开始迁移
gluster volume replace-brick gv0 10.0.21.246:/glusterfs/brick1/gv0 10.0.21.245:/glusterfs/brick1/gv0 status # 迁移状态
gluster volume replace-brick gv0 10.0.21.246:/glusterfs/brick1/gv0 10.0.21.245:/glusterfs/brick1/gv0 commit # 迁移完成后提交