####环境
gluster-server: 192.168.10.91 192.168.10.92 192.168.10.93 192.168.10.94 client: 192.168.10.95
####安装cluster
- 准备磁盘 我们准备的是/dev/mapper/vg_ops1-lv_gfs 这个逻辑卷
# mkfs.ext4 /dev/mapper/vg_ops1-lv_gfs
# mkdir -p /media/gfs && mount /dev/mapper/vg_ops1-lv_gfs /media/gfs
- 安装glusterfs
- 启动glusterfs
# /etc/init.d/glusterd start
####配置gluster
- 192.168.10.91添加节点
# gluster
gluster> peer probe 192.168.10.92
peer probe: success.
gluster> peer probe 192.168.10.93
peer probe: success.
gluster> peer probe 192.168.10.94
peer probe: success.
gluster> peer status
Number of Peers: 3
Hostname: 192.168.10.92
Uuid: bfda894d-d644-415c-8a96-d5e28402da9e
State: Peer in Cluster (Connected)
Hostname: 192.168.10.93
Uuid: 8db3897b-8c11-4abf-9730-d3d254288e21
State: Peer in Cluster (Connected)
Hostname: 192.168.10.94
Uuid: 43e2d410-11b9-49d2-ad47-1943ae24c997
State: Peer in Cluster (Connected)
gluster> pool list
UUID Hostname State
bfda894d-d644-415c-8a96-d5e28402da9e 192.168.10.92 Connected
8db3897b-8c11-4abf-9730-d3d254288e21 192.168.10.93 Connected
43e2d410-11b9-49d2-ad47-1943ae24c997 192.168.10.94 Connected
a43aa927-7a70-44ad-b5d1-14dc616b561c localhost Connected
-配置卷
发生一个报错,提示/media/gfs是挂载点,使用force后返回成功,节点数需为配置的replica数的倍数
gluster> volume create vol1 replica 2 192.168.10.91:/media/gfs 192.168.10.92:/media/gfs
volume create: vol1: failed: The brick 192.168.10.91:/media/gfs is a mount point. Please create a sub-directory under the mount point and use that as the brick directory. Or use 'force' at the end of the command if you want to override this behavior.
gluster> volume create vol1 replica 2 192.168.10.91:/media/gfs 192.168.10.92:/media/gfs force
volume create: vol1: success: please start the volume to access data
- 查看卷信息
gluster> volume info #查看卷信息
Volume Name: vol1
Type: Replicate
Volume ID: b085c24d-162e-4fc2-9de9-554299f2aa18
Status: Created
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: 192.168.10.91:/media/gfs
Brick2: 192.168.10.92:/media/gfs
Options Reconfigured:
performance.readdir-ahead: on
- 激活卷
gluster> volume start vol1 #必须激活卷后才能挂载
volume start: vol1: success
####client安装fuse及使用
- 安装fuse
# yum install http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-6.6/x86_64/glusterfs{,-fuse,-libs,-client-xlators}-3.7.11-1.el6.x86_64.rpm -y
- 挂载vol1,可多台client挂载,同时读写
# mkdir -p /medis/gfs-vol1 && mount.glusterfs 192.168.10.92:vol1 /medis/gfs-vol1
- 可直接cd到/medis/gfs-vol1目录下进行文件的操作
[root@ops5 gfs-vol1]# echo 112233 > d
[root@ops5 gfs-vol1]# cat d
112233
所有文件
# ls
a b c d file1 file10 file11 file2 file3 file4 file5 file6 file7 file8 file9 lost+found
各节点文件分布
[root@ops1 ~]# ls /media/gfs/
file1 file2 file5 file6 file8 lost+found
[root@ops2 ~]# ls /media/gfs/
file1 file2 file5 file6 file8 lost+found
[root@ops3 ~]# ls /media/gfs/
a b c d file10 file11 file3 file4 file7 file9 lost+found
[root@ops4 gfs-vol1]# ls /media/gfs/
a b c d file10 file11 file3 file4 file7 file9 lost+found
####gluster的其他操作
#####vloume的扩容
gluster> volume add-brick vol1 192.168.10.93:/media/gfs 192.168.10.94:/media/gfs force
volume add-brick: success
gluster> volume info
Volume Name: vol1
Type: Distributed-Replicate #注意这里的变化
Volume ID: b085c24d-162e-4fc2-9de9-554299f2aa18
Status: Started
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 192.168.10.91:/media/gfs
Brick2: 192.168.10.92:/media/gfs
Brick3: 192.168.10.93:/media/gfs
Brick4: 192.168.10.94:/media/gfs
Options Reconfigured:
performance.readdir-ahead: on
#####vloume的平衡
gluster> volume rebalance vol1 start
volume rebalance: vol1: success: Rebalance on vol1 has been started successfully. Use rebalance status command to check status of the rebalance process.
ID: b10dd77e-a1b6-4c7c-abcc-b8b96792c1b2
gluster> volume rebalance vol1 status
Node Rebalanced-files size scanned failures skipped status run time in h:m:s
--------- ----------- ----------- ----------- ----------- ----------- ------------ --------------
localhost 3 5Bytes 3 0 0 completed 0:0:0
192.168.10.92 0 0Bytes 0 0 0 completed 0:0:0
192.168.10.93 0 0Bytes 3 0 0 completed 0:0:0
192.168.10.94 0 0Bytes 0 0 0 completed 0:0:0
volume rebalance: vol1: success
#####查看所有卷
gluster> volume list
vol1
#####查看volume状态
后面加detail可查看容量,clients查看客户端连接及发送数据量,tasks为任务,其他具体见help
gluster> volume status vol1 #不加volume name则为所有
Status of volume: vol1
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick 192.168.10.91:/media/gfs 49152 0 Y 20959
Brick 192.168.10.92:/media/gfs 49152 0 Y 5288
Brick 192.168.10.93:/media/gfs 49152 0 Y 15863
Brick 192.168.10.94:/media/gfs 49152 0 Y 3840
NFS Server on localhost N/A N/A N N/A
Self-heal Daemon on localhost N/A N/A Y 21207
NFS Server on 192.168.10.92 N/A N/A N N/A
Self-heal Daemon on 192.168.10.92 N/A N/A Y 5359
NFS Server on 192.168.10.94 N/A N/A N N/A
Self-heal Daemon on 192.168.10.94 N/A N/A Y 3868
NFS Server on 192.168.10.93 N/A N/A N N/A
Self-heal Daemon on 192.168.10.93 N/A N/A Y 15891
Task Status of Volume vol1
------------------------------------------------------------------------------
Task : Rebalance
ID : b10dd77e-a1b6-4c7c-abcc-b8b96792c1b2
Status : completed