一: Gluster Node 配置
host配置 | [root@GlusterFS03 ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.100 Glusterfs01 10.0.0.101 Glusterfs02 10.0.0.102 Glusterfs03
[root@GlusterFS03 ~]# |
yum配置 | [root@Glusterfs01 yum.repos.d]# scp CentOS7-Base.repo epel.repo Glusterfs03:/etc/yum.repos.d/ The authenticity of host 'glusterfs03 (10.0.0.102)' can't be established. ECDSA key fingerprint is 2b:65:f1:2d:24:9f:c3:56:5e:1c:86:c6:c5:65:d9:53. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'glusterfs03,10.0.0.102' (ECDSA) to the list of known hosts. root@glusterfs03's password: CentOS7-Base.repo 100% 1572 1.5KB/s 00:00 epel.repo 100% 1572 1.5KB/s 00:00 [root@Glusterfs01 yum.repos.d]#
[root@GlusterFS03 yum.repos.d]# ls CentOS7-Base.repo epel.repo [root@GlusterFS03 yum.repos.d]#
|
网络配置 | 和上面的方法是一样的! [root@Glusterfs01 network-scripts]# scp ifcfg-eno16777736 Glusterfs03:/etc/sysconfig/network-scripts/ root@glusterfs03's password: ifcfg-eno16777736 100% 400 0.4KB/s 00:00 [root@Glusterfs01 network-scripts]# scp ifcfg-eno33554984 Glusterfs03:/etc/sysconfig/network-scripts/ root@glusterfs03's password: ifcfg-eno33554984 100% 289 0.3KB/s 00:00 [root@Glusterfs01 network-scripts]#
|
|
[root@GlusterFS03 network-scripts]# cat ifcfg-eno16777736 TYPE=Ethernet BOOTPROTO=dhcp DEFROUTE=yes IPV4_FAILURE_FATAL=no IPV6INIT=yes IPV6_AUTOCONF=yes IPV6_DEFROUTE=yes IPV6_FAILURE_FATAL=no NAME=eno16777736
UUID=d84ee072-d5a7-4653-bb34-a5396661a286 DEVICE=eno16777736 ONBOOT=yes PEERDNS=yes PEERROUTES=yes IPV6_PEERDNS=yes IPV6_PEERROUTES=yes
#IPADDR=10.83.16.245 #NETMASK=255.255.254.0 #GATEWAY=10.83.16.1 DNS1=10.25.1.7 DNS2=10.10.0.5 DNS3=10.10.0.7
[root@GlusterFS03 network-scripts]#
[root@GlusterFS03 network-scripts]# cat ifcfg-eno33554984 TYPE=Ethernet BOOTPROTO=none DEFROUTE=yes IPV4_FAILURE_FATAL=no IPV6INIT=yes IPV6_AUTOCONF=yes IPV6_DEFROUTE=yes IPV6_FAILURE_FATAL=no NAME=eno33554984 DEVICE=eno33554984 ONBOOT=yes PEERDNS=yes PEERROUTES=yes IPV6_PEERDNS=yes IPV6_PEERROUTES=yes
IPADDR=10.0.0.102 NETMASK=255.255.255.0
[root@GlusterFS03 network-scripts]# ---------------------------------------------------------------------------------------------------------- [root@GlusterFS03 ~]# ping 10.83.16.1 PING 10.83.16.1 (10.83.16.1) 56(84) bytes of data. 64 bytes from 10.83.16.1: icmp_seq=1 ttl=255 time=2.10 ms 64 bytes from 10.83.16.1: icmp_seq=2 ttl=255 time=2.16 ms ^C --- 10.83.16.1 ping statistics --- 2 packets transmitted, 2 received, 0% packet loss, time 1000ms rtt min/avg/max/mdev = 2.101/2.131/2.162/0.055 ms [root@GlusterFS03 ~]#
|
| [root@GlusterFS03 ~]# yum clean all [root@GlusterFS03 ~]# yum makecache [root@GlusterFS03 ~]# yum install net-tools [root@GlusterFS03 ~]# ifconfig -a eno16777736: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 10.83.29.97 netmask 255.255.252.0 broadcast 10.83.31.255 inet6 fe80::20c:29ff:fe10:88b2 prefixlen 64 scopeid 0x20<link> ether 00:0c:29:10:88:b2 txqueuelen 1000 (Ethernet) RX packets 34858 bytes 33016468 (31.4 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 12342 bytes 841569 (821.8 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eno33554984: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 10.0.0.102 netmask 255.255.255.0 broadcast 10.0.0.255 inet6 fe80::20c:29ff:fe10:88bc prefixlen 64 scopeid 0x20<link> ether 00:0c:29:10:88:bc txqueuelen 1000 (Ethernet) RX packets 720 bytes 49317 (48.1 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 705 bytes 165871 (161.9 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536 inet 127.0.0.1 netmask 255.0.0.0 inet6 ::1 prefixlen 128 scopeid 0x10<host> loop txqueuelen 0 (Local Loopback) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@GlusterFS03 ~]# [root@GlusterFS03 ~]# yum install wget |
在三个节点都安装glusterfs | [root@GlusterFS03 ~]# yum install centos-release-gluster [root@GlusterFS03 ~]# yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma
[root@GlusterFS03 ~]# yum --enablerepo=centos-gluster*-test install glusterfs-server glusterfs-cli glusterfs-geo-replication -y
[root@GlusterFS03 ~]# systemctl start glusterd.service [root@GlusterFS03 ~]# systemctl enable glusterd.service Created symlink from /etc/systemd/system/multi-user.target.wants/glusterd.service to /usr/lib/systemd/system/glusterd.service. [root@GlusterFS03 ~]# systemctl enable glusterd.service [root@GlusterFS03 ~]# systemctl status glusterd.service 鈼?[0m glusterd.service - GlusterFS, a clustered file-system server Loaded: loaded (/usr/lib/systemd/system/glusterd.service; enabled; vendor preset: disabled) Active: active (running) since Wed 2018-03-28 23:15:29 CST; 18s ago Main PID: 11988 (glusterd) CGroup: /system.slice/glusterd.service 鈹斺攢11988 /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO
Mar 28 23:15:29 GlusterFS03 systemd[1]: Starting GlusterFS, a clustered file-system server... Mar 28 23:15:29 GlusterFS03 systemd[1]: Started GlusterFS, a clustered file-system server. [root@GlusterFS03 ~]#
|
关闭防火墙 | [root@GlusterFS03 ~]# iptables -F [root@GlusterFS03 ~]# systemctl stop firewalld.service [root@GlusterFS03 ~]# systemctl disable firewalld.service Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. Removed symlink /etc/systemd/system/basic.target.wants/firewalld.service. [root@GlusterFS03 ~]# systemctl disable firewalld.service [root@GlusterFS03 ~]# systemctl stop firewalld [root@GlusterFS03 ~]# systemctl disable firewalld [root@GlusterFS03 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux [root@GlusterFS03 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config [root@GlusterFS03 ~]# setenforce 0 [root@GlusterFS03 ~]# iptables -L Chain INPUT (policy ACCEPT) target prot opt source destination
Chain FORWARD (policy ACCEPT) target prot opt source destination
Chain OUTPUT (policy ACCEPT) target prot opt source destination [root@GlusterFS03 ~]# iptables -F [root@GlusterFS03 ~]# |
二:测试各种卷
测试三种卷
1)分布式卷 +复制 | 随机选择一台服务器写到/data/exp1 或2 企业一般用 大部分会用分布式复制(可用容量为 总容量/复制份数),通过网络传输的话最好用万兆交换机,万兆网卡来做。这样就会优化一部分性能。它们的数据都是通过网络来传输的。 ----------------------------------------------- 三台服务器都操作: [root@Glusterfs01 /]# mkdir /exp1 /exp2 [root@Glusterfs01 /]# [root@GlusterFS02 ~]# mkdir /exp1 /exp2 [root@GlusterFS02 ~]# [root@GlusterFS03 ~]# mkdir /exp1 /exp2 [root@GlusterFS03 ~]#
创建逻辑卷(+force强制创建):
[root@Glusterfs01 /]# gluster volume create hehe-volume replica 2 transport tcp Glusterfs01:/exp1/ Glusterfs01:/exp2/ Glusterfs02:/exp1/ Glusterfs02:/exp2/ Glusterfs03:/exp1/ Glusterfs03:/exp2/ force
volume create: hehe-volume: success: please start the volume to access data [root@Glusterfs01 /]#
启动
[root@Glusterfs01 /]# gluster volume start hehe-volume volume start: hehe-volume: success [root@Glusterfs01 /]#
查询 [root@Glusterfs01 /]# gluster volume info hehe-volume
Volume Name: hehe-volume Type: Distributed-Replicate Volume ID: 7ac0d232-021c-4fe3-9aed-6d5ff351f01a Status: Started Snapshot Count: 0 Number of Bricks: 3 x 2 = 6 Transport-type: tcp Bricks: Brick1: Glusterfs01:/exp1 Brick2: Glusterfs01:/exp2 Brick3: Glusterfs02:/exp1 Brick4: Glusterfs02:/exp2 Brick5: Glusterfs03:/exp1 Brick6: Glusterfs03:/exp2 Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off [root@Glusterfs01 /]#
创建挂载目录并挂载
[root@Glusterfs01 ~]# mkdir g5 [root@Glusterfs01 ~]# mount -t glusterfs Glusterfs01:/hehe-volume /root/g5 [root@Glusterfs01 ~]# df -h g5 Filesystem Size Used Avail Use% Mounted on Glusterfs01:/hehe-volume 13G 1.8G 11G 15% /root/g5 [root@Glusterfs01 ~]#
测试成果
[root@Glusterfs01 ~]# man ls > /root/g5/ls01.txt [root@Glusterfs01 ~]# man ls > /root/g5/ls02.txt [root@Glusterfs01 ~]# man ls > /root/g5/ls03.txt [root@Glusterfs01 ~]# man ls > /root/g5/ls04.txt [root@Glusterfs01 ~]#
检查验证:
在三台机器分别检查
[root@Glusterfs01 ~]# yum install tree [root@Glusterfs01 ~]# tree /exp* /exp1 ├── ls03.txt └── ls04.txt /exp2 ├── ls03.txt └── ls04.txt
0 directories, 4 files [root@Glusterfs01 ~]#
2 ) Glusterfs02主机: [root@GlusterFS02 ~]# yum install tree [root@GlusterFS02 ~]# tree /exp* /exp1 └── ls01.txt /exp2 └── ls01.txt
0 directories, 2 files [root@GlusterFS02 ~]#
3 ) Glusterfs03主机: [root@GlusterFS03 ~]# yum install tree [root@GlusterFS03 ~]# tree /exp* /exp1 └── ls02.txt /exp2 └── ls02.txt
0 directories, 2 files [root@GlusterFS03 ~]#
分布式+复制,这样的分布不均匀 所以这些目录里的文件创建的时候跟逻辑卷的顺序有关系 |
2)复制式卷 | 两台复制写到/data/exp* 相当于raid1 写两份 ----------------------------------------------------------- 我们再来测试创建不同顺序的复制卷
[root@Glusterfs01 ~]# mkdir /exp3 /exp4 [root@Glusterfs01 ~]# [root@GlusterFS02 ~]# mkdir /exp3 /exp4 [root@GlusterFS02 ~]# [root@GlusterFS03 ~]# mkdir /exp3 /exp4 [root@GlusterFS03 ~]#
[root@Glusterfs01 ~]# gluster volume create hehehe-volume replica 2 transport tcp Glusterfs01:/exp3/ Glusterfs02:/exp3/ Glusterfs03:/exp3/ Glusterfs01:/exp4/ Glusterfs02:/exp4/ Glusterfs03:/exp4/ force
volume create: hehehe-volume: success: please start the volume to access data [root@Glusterfs01 ~]#
启动: root@Glusterfs01 ~]# gluster volume start hehehe-volume volume start: hehehe-volume: success [root@Glusterfs01 ~]#
检查卷:
[root@Glusterfs01 ~]# gluster volume info hehehe-volume
Volume Name: hehehe-volume Type: Distributed-Replicate Volume ID: cd0be8ea-e273-4723-bd6f-324c900e03fe Status: Started Snapshot Count: 0 Number of Bricks: 3 x 2 = 6 Transport-type: tcp Bricks: Brick1: Glusterfs01:/exp3 Brick2: Glusterfs02:/exp3 Brick3: Glusterfs03:/exp3 Brick4: Glusterfs01:/exp4 Brick5: Glusterfs02:/exp4 Brick6: Glusterfs03:/exp4 Options Reconfigured: transport.address-family: inet nfs.disable: on performance.client-io-threads: off [root@Glusterfs01 ~]#
挂载: [root@Glusterfs01 ~]# mkdir g6 [root@Glusterfs01 ~]# mount -t glusterfs Glusterfs01:/hehehe-volume /root/g6 [root@Glusterfs01 ~]# df -h /root/g6 Filesystem Size Used Avail Use% Mounted on Glusterfs01:/hehehe-volume 13G 1.9G 11G 15% /root/g6 [root@Glusterfs01 ~]#
测试写入文件: [root@Glusterfs01 ~]# man vgdisplay > /root/g6/vgdisplay01.txt [root@Glusterfs01 ~]# man vgdisplay > /root/g6/vgdisplay02.txt [root@Glusterfs01 ~]# man vgdisplay > /root/g6/vgdisplay03.txt [root@Glusterfs01 ~]# man vgdisplay > /root/g6/vgdisplay04.txt [root@Glusterfs01 ~]# man vgdisplay > /root/g6/vgdisplay05.txt [root@Glusterfs01 ~]# man vgdisplay > /root/g6/vgdisplay06.txt [root@Glusterfs01 ~]#
机器检查:
在三台机器分别检查
[root@Glusterfs01 ~]# tree /exp* /exp1 ├── ls03.txt └── ls04.txt /exp2 ├── ls03.txt └── ls04.txt /exp3 ├── vgdisplay01.txt ├── vgdisplay02.txt ├── vgdisplay03.txt └── vgdisplay04.txt /exp4 └── vgdisplay06.txt
0 directories, 9 files [root@Glusterfs01 ~]#
2 ) Glusterfs02主机: [root@GlusterFS02 ~]# tree /exp* /exp1 └── ls01.txt /exp2 └── ls01.txt /exp3 ├── vgdisplay01.txt ├── vgdisplay02.txt ├── vgdisplay03.txt └── vgdisplay04.txt /exp4 └── vgdisplay05.txt
0 directories, 7 files [root@GlusterFS02 ~]#
3 ) Glusterfs03主机: [root@GlusterFS03 ~]# tree /exp* /exp1 └── ls02.txt /exp2 └── ls02.txt /exp3 └── vgdisplay06.txt /exp4 └── vgdisplay05.txt
0 directories, 4 files [root@GlusterFS03 ~]#
这样分布就均匀了 数据库也分布复制成功 ----------------------------------------------------------------------------------------------------------------------------------------------- [root@Glusterfs01 g6]# touch {a..g} [root@Glusterfs01 g6]# ll total 0 -rw-r--r-- 1 root root 0 Mar 30 00:21 a -rw-r--r-- 1 root root 0 Mar 30 00:21 b -rw-r--r-- 1 root root 0 Mar 30 00:21 c -rw-r--r-- 1 root root 0 Mar 30 00:21 d -rw-r--r-- 1 root root 0 Mar 30 00:21 e -rw-r--r-- 1 root root 0 Mar 30 00:21 f -rw-r--r-- 1 root root 0 Mar 30 00:21 g [root@Glusterfs01 g6]#
[root@Glusterfs01 g6]# tree /exp* /exp1 ├── ls03.txt └── ls04.txt /exp2 ├── ls03.txt └── ls04.txt /exp3 ├── d └── f /exp4 ├── a └── c
0 directories, 8 files [root@Glusterfs01 g6]# [root@GlusterFS02 ~]# tree /exp* /exp1 └── ls01.txt /exp2 └── ls01.txt /exp3 ├── d └── f /exp4 ├── b ├── e └── g
0 directories, 7 files [root@GlusterFS02 ~]# [root@GlusterFS03 ~]# tree /exp* /exp1 └── ls02.txt /exp2 └── ls02.txt /exp3 ├── a └── c /exp4 ├── b ├── e └── g
|
扩展卷测试 | 当前卷: [root@GlusterFS03 ~]# gluster volume info test-volume
Volume Name: test-volume Type: Distribute Volume ID: 9b938f41-105c-41b7-a327-de4c26d063e0 Status: Started Snapshot Count: 0 Number of Bricks: 3 Transport-type: tcp Bricks: Brick1: GlusterFS02:/data/exp1 Brick2: GlusterFS03:/data/exp2 Brick3: GlusterFS01:/data/exp0 Options Reconfigured: nfs.disable: on transport.address-family: inet [root@GlusterFS03 ~]#
添加的是已经存在的卷 [root@GlusterFS03 ~]# mkdir /data/exp9 [root@GlusterFS03 ~]# gluster volume add-brick test-volume GlusterFS03:/data/exp9/ force volume add-brick: success [root@GlusterFS03 ~]#
查看状态: [root@Glusterfs01 ~]# gluster volume info test-volume
Volume Name: test-volume Type: Distribute Volume ID: 9b938f41-105c-41b7-a327-de4c26d063e0 Status: Started Snapshot Count: 0 Number of Bricks: 4 Transport-type: tcp Bricks: Brick1: GlusterFS02:/data/exp1 Brick2: GlusterFS03:/data/exp2 Brick3: GlusterFS01:/data/exp0 Brick4: GlusterFS03:/data/exp9 Options Reconfigured: nfs.disable: on transport.address-family: inet [root@Glusterfs01 ~]#
重新平衡一下分布式卷: [root@Glusterfs01 ~]# gluster volume rebalance test-volume start volume rebalance: test-volume: success: Rebalance on test-volume has been started successfully. Use rebalance status command to check status of the rebalance process. ID: 08bbdc66-4f04-496b-ab9c-6635340745ae [root@Glusterfs01 ~]#
|
删除测试卷: | 删除brick 数据会丢失, 删除扩展的卷: [root@Glusterfs01 ~]# [root@Glusterfs01 ~]# gluster volume remove-brick test-volume Glusterfs03:/data/exp9 start volume remove-brick start: success ID: 984a7f11-b6cb-4e8d-b53e-2941d23a8284 [root@Glusterfs01 ~]#
检查卷:
[root@Glusterfs01 ~]# gluster volume info test-volume
Volume Name: test-volume Type: Distribute Volume ID: 9b938f41-105c-41b7-a327-de4c26d063e0 Status: Started Snapshot Count: 0 Number of Bricks: 4 Transport-type: tcp Bricks: Brick1: GlusterFS02:/data/exp1 Brick2: GlusterFS03:/data/exp2 Brick3: GlusterFS01:/data/exp0 Brick4: GlusterFS03:/data/exp9 Options Reconfigured: performance.client-io-threads: on nfs.disable: on transport.address-family: inet [root@Glusterfs01 ~]#
[root@Glusterfs01 ~]# gluster volume status test-volume Status of volume: test-volume Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick GlusterFS02:/data/exp1 49154 0 Y 2435 Brick GlusterFS03:/data/exp2 49154 0 Y 2435 Brick GlusterFS01:/data/exp0 49154 0 Y 2450 Brick GlusterFS03:/data/exp9 49159 0 Y 6773
Task Status of Volume test-volume ------------------------------------------------------------------------------ Task : Remove brick ID : 984a7f11-b6cb-4e8d-b53e-2941d23a8284 Removed bricks: Glusterfs03:/data/exp9 Status : completed
[root@Glusterfs01 ~]#
再次删除:
root@Glusterfs01 ~]# gluster volume remove-brick test-volume Glusterfs03:/data/exp9 force Removing brick(s) can result in data loss. Do you want to Continue? (y/n) y volume remove-brick commit force: success [root@Glusterfs01 ~]#
[root@Glusterfs01 ~]# gluster volume status test-volume Status of volume: test-volume Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick GlusterFS02:/data/exp1 49154 0 Y 2435 Brick GlusterFS03:/data/exp2 49154 0 Y 2435 Brick GlusterFS01:/data/exp0 49154 0 Y 2450
Task Status of Volume test-volume ------------------------------------------------------------------------------ There are no active volume tasks
[root@Glusterfs01 ~]#
删除卷之后再平衡一下: [root@Glusterfs01 ~]# gluster volume rebalance test-volume start volume rebalance: test-volume: success: Rebalance on test-volume has been started successfully. Use rebalance status command to check status of the rebalance process. ID: c0f1b259-a617-4807-90dc-a47a3d71b9b5
[root@Glusterfs01 ~]# gluster volume status test-volume Status of volume: test-volume Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick GlusterFS02:/data/exp1 49154 0 Y 2435 Brick GlusterFS03:/data/exp2 49154 0 Y 2435 Brick GlusterFS01:/data/exp0 49154 0 Y 2450
Task Status of Volume test-volume ------------------------------------------------------------------------------ Task : Rebalance ID : c0f1b259-a617-4807-90dc-a47a3d71b9b5 Status : completed
[root@Glusterfs01 ~]#
root@Glusterfs01 ~]# gluster volume info test-volume
Volume Name: test-volume Type: Distribute Volume ID: 9b938f41-105c-41b7-a327-de4c26d063e0 Status: Started Snapshot Count: 0 Number of Bricks: 3 Transport-type: tcp Bricks: Brick1: GlusterFS02:/data/exp1 Brick2: GlusterFS03:/data/exp2 Brick3: GlusterFS01:/data/exp0 Options Reconfigured: performance.client-io-threads: on nfs.disable: on transport.address-family: inet [root@Glusterfs01 ~]#
|
| [root@Glusterfs01 ~]# rpm -qa |grep gluster* glusterfs-3.12.7-1.el7.x86_64 glusterfs-cli-3.12.7-1.el7.x86_64 centos-release-gluster312-1.0-1.el7.centos.noarch glusterfs-libs-3.12.7-1.el7.x86_64 glusterfs-client-xlators-3.12.7-1.el7.x86_64 glusterfs-api-3.12.7-1.el7.x86_64 glusterfs-server-3.12.7-1.el7.x86_64 python2-gluster-3.12.7-1.el7.x86_64 glusterfs-geo-replication-3.12.7-1.el7.x86_64 glusterfs-fuse-3.12.7-1.el7.x86_64 glusterfs-rdma-3.12.7-1.el7.x86_64 [root@Glusterfs01 ~]#
|
3: 配置条带卷 | 配置条带卷 # 创建条带卷 [root@Glusterfs01 /]# mkdir exp5 [root@Glusterfs01 /]# [root@GlusterFS02 ~]# mkdir exp5 [root@GlusterFS02 ~]# [root@GlusterFS03 ~]# mkdir exp5 [root@GlusterFS03 ~]# [root@Glusterfs01 /]# gluster volume create stripe-volume stripe 3 Glusterfs01:/exp5 Glusterfs02:/exp5 Glusterfs03:/exp5 force
volume create: stripe-volume: success: please start the volume to access data [root@Glusterfs01 /]#
# 启动条带卷 [root@Glusterfs01 /]# gluster volume start stripe-volume volume start: stripe-volume: success [root@Glusterfs01 /]# # 查看卷信息 [root@Glusterfs01 /]# gluster volume info stripe-volume
Volume Name: stripe-volume Type: Stripe Volume ID: 9806d472-d5ff-4ff4-bc32-06921be4b6a0 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: Glusterfs01:/exp5 Brick2: Glusterfs02:/exp5 Brick3: Glusterfs03:/exp5 Options Reconfigured: transport.address-family: inet nfs.disable: on [root@Glusterfs01 /]#
# 挂载卷到目录,创建测试文件 [root@Glusterfs01 ~]# mkdir g7 [root@Glusterfs01 ~]# mount -t glusterfs Glusterfs01:/stripe-volume /root/g7 [root@Glusterfs01 ~]# [root@Glusterfs01 ~]# df -m /root/g7 Filesystem 1M-blocks Used Available Use% Mounted on Glusterfs01:/stripe-volume 25986 3412 22575 14% /root/g7 [root@Glusterfs01 ~]#
[root@Glusterfs01 ~]# echo "Glusterfs01:/stripe-volume /root/g7 glusterfs default 0 0" >>/etc/fstab [root@Glusterfs01 ~]# ------------------------------------------------------------------------------------------------------------------------------------------------------ [root@Glusterfs01 g7]# dd if=/dev/zero bs=1024 count=10000 of=/root/g7/10M.file 10000+0 records in 10000+0 records out 10240000 bytes (10 MB) copied, 1.53348 s, 6.7 MB/s [root@Glusterfs01 g7]# dd if=/dev/zero bs=1024 count=20000 of=/root/g7/20M.file 20000+0 records in 20000+0 records out 20480000 bytes (20 MB) copied, 2.80855 s, 7.3 MB/s [root@Glusterfs01 g7]#
# 查看新创建的文件的大小 [root@Glusterfs01 g7]# ll -h /root/g7 total 30M -rw-r--r-- 1 root root 9.8M Mar 30 00:42 10M.file -rw-r--r-- 1 root root 20M Mar 30 00:42 20M.file [root@Glusterfs01 g7]#
# 文件实际存放位置及所在位置的大小
[root@Glusterfs01 # ll -h /exp5 total 9.9M -rw-r--r-- 2 root root 3.3M Mar 30 00:42 10M.file -rw-r--r-- 2 root root 6.6M Mar 30 00:42 20M.file [root@Glusterfs01 ]#
[root@GlusterFS02 ~]# ll -h /exp5 total 9.8M -rw-r--r-- 2 root root 3.3M Mar 30 00:42 10M.file -rw-r--r-- 2 root root 6.5M Mar 30 00:42 20M.file [root@GlusterFS02 ~]#
[root@GlusterFS03 ~]# ll -h /exp5 total 9.8M -rw-r--r-- 2 root root 3.3M Mar 30 00:42 10M.file -rw-r--r-- 2 root root 6.5M Mar 30 00:42 20M.file [root@GlusterFS03 ~]#
# 上面可以看到 10M 20M 的文件分别分成了 3 块(这是条带的特点),写入的时候是循环地一点一点在GlusterFS01,GlusterFS02和GlusterFS03的磁盘上写数据,跟raid0很类似
上面配置的条带卷在生产环境是很少使用的,因为它会将文件破坏,比如一个图片,它会将图片一份一份地分别存到条带卷 |
挂载记入fstab | [root@Glusterfs01 ~]# echo "Glusterfs01:/test-volume /root/g1 glusterfs defaults 0 0" >>/etc/fstab [root@Glusterfs01 ~]# echo "Glusterfs01:/repl-volume /root/g2 glusterfs defaults 0 0" >>/etc/fstab [root@Glusterfs01 ~]# echo "Glusterfs01:/raid0-volume /root/g3 glusterfs defaults 0 0" >>/etc/fstab [root@Glusterfs01 ~]# echo "Glusterfs01:/hehe-volume /root/g5 glusterfs defaults 0 0" >>/etc/fstab [root@Glusterfs01 ~]# echo "Glusterfs01:/hehehe-volume /root/g6 glusterfs defaults 0 0" >>/etc/fstab [root@Glusterfs01 ~]#
如下: [root@Glusterfs01 ~]# cat /etc/fstab
# # /etc/fstab # Created by anaconda on Thu Mar 22 19:13:56 2018 # # Accessible filesystems, by reference, are maintained under '/dev/disk' # See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info # /dev/mapper/centos-root / xfs defaults 0 0 UUID=c12a9776-0e5f-45f5-bbd3-cd6000da9b35 /boot xfs defaults 0 0 /dev/mapper/centos-swap swap swap defaults 0 0 Glusterfs01:/test-volume /root/g1 glusterfs defaults 0 0 Glusterfs01:/repl-volume /root/g2 glusterfs defaults 0 0 Glusterfs01:/raid0-volume /root/g3 glusterfs defaults 0 0 Glusterfs01:/hehe-volume /root/g5 glusterfs defaults 0 0 Glusterfs01:/hehehe-volume /root/g6 glusterfs defaults 0 0 [root@Glusterfs01 ~]#
|
确认挂载结果 | [root@Glusterfs01 g7]# mount -t fuse.glusterfs Glusterfs01:/test-volume on /root/g1 type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) Glusterfs01:/repl-volume on /root/g2 type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) Glusterfs01:/raid0-volume on /root/g3 type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) Glusterfs01:/hehehe-volume on /root/g6 type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) Glusterfs01:/stripe-volume on /root/g7 type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) [root@Glusterfs01 g7]# |