1. 源码编译
1.1 安装步骤
- 操作系统类型:centos7.4
yum install autoconf automake bison cmockery2-devel dos2unix flex \
fuse-devel glib2-devel libacl-devel libaio-devel libattr-devel \
libcurl-devel libibverbs-devel librdmacm-devel libtirpc-devel \
libtool libxml2-devel lvm2-devel make openssl-devel pkgconfig \
pyliblzma python-devel python-eventlet python-netifaces \
python-paste-deploy python-simplejson python-sphinx python-webob \
pyxattr readline-devel rpm-build sqlite-devel systemtap-sdt-devel \
tar userspace-rcu-devel
./autogen.sh
./configure --without-libtirpc
make install -j 8
- 查看是否成功:
[root@instance-bt783gwn glusterfs]# glusterd
[root@instance-bt783gwn glusterfs]# ps ax|grep glus
32386 ? Ssl 0:00 glusterd
32421 pts/0 R+ 0:00 grep --color=auto glus
成功!
1.2 参考文档
2. 服务端启动
目前,有两台主机,配置如下:
# gfs-node0
[root@instance-bt783gwn glusterfs]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 40G 0 disk
└─vda1 253:1 0 40G 0 part /
vdb 253:16 0 5G 0 disk
vdc 253:32 0 5G 0 disk
vdd 253:48 0 5G 0 disk
# gfs-node1
[root@VM_0_2_centos glusterfs]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 41M 0 rom
vda 253:0 0 50G 0 disk
└─vda1 253:1 0 50G 0 part /
vdb 253:16 0 20G 0 disk
├─vdb1 253:17 0 5G 0 part
├─vdb2 253:18 0 5G 0 part
└─vdb3 253:19 0 5G 0 part
- 关闭防火墙,以及selinux
[root@instance-bt783gwn glusterfs]# systemctl disable firewalld
[root@instance-bt783gwn glusterfs]# systemctl stop firewalld
[root@instance-bt783gwn glusterfs]# setenforce 0
setenforce: SELinux is disabled
-
启动glusterd服务:
glusterd
-
修改
/etc/hosts
[root@VM_0_2_centos glusterfs]# cat /etc/hosts
127.0.0.1 VM_0_2_centos VM_0_2_centos
127.0.0.1 localhost.localdomain localhost
127.0.0.1 localhost4.localdomain4 localhost4
::1 VM_0_2_centos VM_0_2_centos
::1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
127.0.0.1 gfs-node1
**.**.**.** gfs-node0
- 两台主机配置监听
# 这里仅在一台主机上,做监听就可以
# 当然,如果另一台主机没有监听信息,则重复此操作
# 这个在gfs-node0上操作的
[root@instance-bt783gwn glusterfs]# gluster peer probe gfs-node1
peer probe: success
# 查看是否成功
[root@instance-bt783gwn glusterfs]# gluster peer status
Number of Peers: 1
Hostname: gfs-node1
Uuid: ad6d25c1-0210-4363-8ec3-20912f3f52e6
State: Peer in Cluster (Connected)
- 磁盘挂载
[root@VM_0_2_centos /]# mkdir data{0..2}
[root@VM_0_2_centos /]# mount /dev/vdb1 data0
[root@VM_0_2_centos /]# mount /dev/vdb2 data1
[root@VM_0_2_centos /]# mount /dev/vdb3 data2
- 创建镜像卷
[root@VM_0_2_centos ~]# gluster volume create test-volume0 replica 2 gfs-node0:/data0 gfs-node1:/data0 force
volume create: test-volume0: success: please start the volume to access data
[root@VM_0_2_centos ~]# gluster volume create test-volume1 replica 2 gfs-node0:/data1 gfs-node1:/data1 force
volume create: test-volume1: success: please start the volume to access data
[root@VM_0_2_centos ~]# gluster volume create test-volume2 replica 2 gfs-node0:/data2 gfs-node1:/data2 force
volume create: test-volume2: success: please start the volume to access data
- 查看卷信息
[root@VM_0_2_centos ~]# gluster volume info
Volume Name: test-volume0
Type: Distribute
Volume ID: 2c3be91a-5a5f-4d95-9822-c763f24f2152
Status: Created
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: gfs-node0:/data0
Brick2: gfs-node1:/data0
Options Reconfigured:
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
Volume Name: test-volume1
Type: Distribute
Volume ID: 1cffafdd-6049-477e-b266-bbc3d5780a42
Status: Created
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: gfs-node0:/data1
Brick2: gfs-node1:/data1
Options Reconfigured:
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
Volume Name: test-volume2
Type: Distribute
Volume ID: abfb7b53-7636-48d9-879c-4fd08d63169c
Status: Created
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: gfs-node0:/data2
Brick2: gfs-node1:/data2
Options Reconfigured:
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
- 启动卷
[root@VM_0_2_centos ~]# gluster volume start test-volume0
volume start: test-volume0: success
[root@VM_0_2_centos ~]# gluster volume start test-volume1
volume start: test-volume1: success
[root@VM_0_2_centos ~]# gluster volume start test-volume2
volume start: test-volume2: success
- 查看卷状态
[root@VM_0_2_centos ~]# gluster volume status
Status of volume: test-volume0
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick gfs-node0:/data0 49152 0 Y 32363
Brick gfs-node1:/data0 49152 0 Y 3117
Task Status of Volume test-volume0
------------------------------------------------------------------------------
There are no active volume tasks
Status of volume: test-volume1
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick gfs-node0:/data1 49153 0 Y 32403
Brick gfs-node1:/data1 49153 0 Y 3158
Task Status of Volume test-volume1
------------------------------------------------------------------------------
There are no active volume tasks
Status of volume: test-volume2
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick gfs-node0:/data2 49154 0 Y 32437
Brick gfs-node1:/data2 49154 0 Y 3204
Task Status of Volume test-volume2
------------------------------------------------------------------------------
There are no active volume tasks
成功!
3 客户端启动
- 创建一个文件夹,并挂载到glusterfs 的卷中
[root@VM_0_2_centos ~]# mkdir /mnt/gluster0
[root@VM_0_2_centos ~]# mount -t glusterfs gfs-node1:test-volume0 /mnt/gluster0
[root@VM_0_2_centos ~]# mkdir /mnt/gluster1
[root@VM_0_2_centos ~]# mount -t glusterfs gfs-node1:test-volume1 /mnt/gluster1
[root@VM_0_2_centos ~]# mkdir /mnt/gluster2
[root@VM_0_2_centos ~]# mount -t glusterfs gfs-node1:test-volume2 /mnt/gluster2
- 查看镜像卷是否创建成功
# 在节点gfs-node1,创建10个文件
[root@instance-bt783gwn /]# cd /mnt/gluster0
[root@instance-bt783gwn gluster0]# touch file{1..10}
# 查看节点gfs-node0
[root@VM_0_2_centos ~]# ls /mnt/gluster0
file1 file10 file2 file3 file4 file5 file6 file7 file8 file9
成功!