[root@weihao04-vm gluster]# iptables -L
Chain INPUT (policy ACCEPT)
target prot opt source destination
ACCEPT udp -- anywhere anywhere udp dpt:domain
ACCEPT tcp -- anywhere anywhere tcp dpt:domain
ACCEPT udp -- anywhere anywhere udp dpt:bootps
ACCEPT tcp -- anywhere anywhere tcp dpt:bootps
ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
ACCEPT icmp -- anywhere anywhere
ACCEPT all -- anywhere anywhere
ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh
REJECT all -- anywhere anywhere reject-with icmp-host-prohibited
Chain FORWARD (policy ACCEPT)
target prot opt source destination
ACCEPT all -- anywhere 192.168.122.0/24 state RELATED,ESTABLISHED
ACCEPT all -- 192.168.122.0/24 anywhere
ACCEPT all -- anywhere anywhere
REJECT all -- anywhere anywhere reject-with icmp-port-unreachable
REJECT all -- anywhere anywhere reject-with icmp-port-unreachable
REJECT all -- anywhere anywhere reject-with icmp-host-prohibited
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
[root@weihao04-vm gluster]# iptables -F
[root@weihao04-vm gluster]# iptables -L
Chain INPUT (policy ACCEPT)
target prot opt source destination
Chain FORWARD (policy ACCEPT)
target prot opt source destination
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
[root@weihao04-vm gluster]#
在CentOS系统中安装glusterfs并进行简单的配置。
一 server端和client端 glusterfs安装
1. 下载glusterfs对应的rpm包。
下载地址: http://download.gluster.org/pub/gluster/glusterfs/
注意:下载时需下载和系统相匹配的版本。在下载前执行rpm -qa |grep glusterfs查看系统已经安装的glusterfs关联的rpm版本号,然后下载和该版本对应的glusterfs版本。(因为不同版本需求libcrypto.so.* ,libpython*.*.so.*等库依赖版本不同)
[root@weihao01-vm glusterfs]# rpm -qa |grep glusterf
glusterfs-devel-3.4.0-8.el6.x86_64
[root@weihao01-vm glusterfs]#
所以在glusterfs ftp中找到对应的版本进行下载。
下载后文件目录:
[root@weihao01-vm 3.4.16]# ls
glusterfs-3.4.0-8.el6.x86_64.rpm glusterfs-debuginfo-3.4.0-8.el6.x86_64.rpm glusterfs-libs-3.4.0-8.el6.x86_64.rpm
glusterfs-api-3.4.0-8.el6.x86_64.rpm glusterfs-devel-3.4.0-8.el6.x86_64.rpm glusterfs-rdma-3.4.0-8.el6.x86_64.rpm
glusterfs-api-devel-3.4.0-8.el6.x86_64.rpm glusterfs-fuse-3.4.0-8.el6.x86_64.rpm glusterfs-server-3.4.0-8.el6.x86_64.rpm
glusterfs-cli-3.4.0-8.el6.x86_64.rpm glusterfs-geo-replication-3.4.0-8.el6.x86_64.rpm
[root@weihao01-vm 3.4.16]#
2. 安装以及安装确认
执行rpm命令进行包的安装和更新(我做的时候所有包都进行了安装-_-)。
rpm -Uvh --force --nodeps glusterfs-libs-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-api-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-api-devel-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-cli-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-debuginfo-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-devel-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-fuse-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-geo-replication-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-server-3.4.0-8.el6.x86_64.rpm
rpm -Uvh --force --nodeps glusterfs-rdma-3.4.0-8.el6.x86_64.rpm
查看rpm是否安装成功:
[root@weihao01-vm 3.4.16]# rpm -qa |grep glusterfs
glusterfs-api-devel-3.4.0-8.el6.x86_64
glusterfs-rdma-3.4.0-8.el6.x86_64
glusterfs-fuse-3.4.0-8.el6.x86_64
glusterfs-libs-3.4.0-8.el6.x86_64
glusterfs-cli-3.4.0-8.el6.x86_64
glusterfs-geo-replication-3.4.0-8.el6.x86_64
glusterfs-api-3.4.0-8.el6.x86_64
glusterfs-debuginfo-3.4.0-8.el6.x86_64
glusterfs-server-3.4.0-8.el6.x86_64
glusterfs-devel-3.4.0-8.el6.x86_64
glusterfs-3.4.0-8.el6.x86_64
[root@weihao01-vm 3.4.16]#
查看glusterfs是否可以正常运行:
[root@weihao01-vm 3.4.16]# /etc/init.d/glusterd start
Starting glusterd: [ OK ]
[root@weihao01-vm 3.4.16]#
可以正常运行。
停止glusterfs的运行:
[root@weihao01-vm 3.4.16]# /etc/init.d/glusterd stop
[root@weihao01-vm 3.4.16]# [ OK ]
Glusterfs停止运行。
Server端和client端 glusterfs安装完成。
二简单配置服务端和客户端
进行glusterfs简单的配置。2server(server1:192.168.100.11, server2:192.168.100.12)和 1client(192.168.100.14)
1. 服务端配置文件配置
Server1(192.168.100.11)作成配置文件如下所示:
[root@weihao01-vm glusterfs]# cat glusterfsd.vol
#指定一个卷,路径为/opt/gluster,作为服务器文件
volume brick
type storage/posix #指定一个本地目录给GlusterFS内的一个卷使用
option directory /opt/gluster
end-volume
#设置卷brick为锁中继(关于中继在附录中介绍)
volume locker
type features/posix-locks #锁中继,只能用于服务器端的posix中继之上,表示给这个卷提供加锁(fcntl locking)的功能;
subvolumes brick
end-volume
#设置卷brick为服务器模式,并指定IP和检测端口,同时设置卷的使用权限为*(全部授权),也可以设置成部分授权,如:192.168.100.*
volume server
type protocol/server #服务器中继,表示此节点在GlusterFS中为服务器模式,可以说明其IP、守护端口、访问权限
option transport-type tcp/server
option bind-address 192.168.100.11 #Server2时IP配置为: 192.168.100.12
option listen-port 6996
subvolumes locker
option auth.addr.brick.allow *
option auth.addr.locker.allow *
end-volume
[root@weihao01-vm glusterfs]#
2. 客户端配置文件配置
Client1(192.168.100.14)作成配置文件如下所示:
[root@weihao04-vm glusterfs]# cat glusterfs.vol
#指向Server1:192.168.100.11服务器的客户端访问配置
volume client1
type protocol/client #客户端中继,用于客户端连接服务器时使用,需要指明服务器IP和定义好的卷
option transport-type tcp/client
option remote-host 192.168.100.11
option transport.socket.remote-port 6996
option remote-subvolume locker
end-volume
#指向Server2:192.168.100.12服务器的客户端访问配置
volume client2
type protocol/client
option transport-type tcp/client
option remote-host 192.168.100.12
# option transport.socket.remote-port 6996
option remote-port 6996
option remote-subvolume locker
end-volume
#将client1和client2设置成复制模式
volume bricks
type cluster/replicate
subvolumes client1 client2
end-volume
[root@weihao04-vm glusterfs]#
3. 启动
启动server1和server2并在/tmp/glusterfsd.log(路径文件自己制定)中查看log信息。
启动命令如下所示:
[root@weihao01-vm glusterfs]# glusterfsd -f /usr/local/etc/glusterfs/glusterfsd.vol -l /tmp/glusterfsd.log
[root@weihao01-vm glusterfs]# tail -f /tmp/glusterfsd.log
16: option transport-type tcp/server
17: option bind-address 192.168.100.11 #Server2时IP配置为: 192.168.100.12
18: option listen-port 6996
19: subvolumes locker
20: option auth.addr.brick.allow *
21: option auth.addr.locker.allow *
22: end-volume
23:
+------------------------------------------------------------------------------+
启动client1并在/tmp/glusterfsd.log(路径文件自己制定)中查看log信息。
启动命令如下所示:
[root@weihao04-vm glusterfs]# glusterfs -f /usr/local/etc/glusterfs/glusterfs.vol -l /tmp/glusterfs.log /data/glusterfs/
[root@weihao04-vm glusterfs]# tail -f /tmp/glusterfs.log
[2014-05-19 10:21:38.473815] I [client-handshake.c:1456:client_setvolume_cbk] 0-client1: Connected to 192.168.100.11:6996, attached to remote volume 'locker'.
[2014-05-19 10:21:38.473868] I [client-handshake.c:1468:client_setvolume_cbk] 0-client1: Server and Client lk-version numbers are not same, reopening the fds
[2014-05-19 10:21:38.474139] I [client-handshake.c:450:client_set_lk_version_cbk] 0-client1: Server lk version = 1
[2014-05-19 10:21:59.423537] I [afr-common.c:2057:afr_set_root_inode_on_first_lookup] 0-bricks: added root inode
[2014-05-20 00:01:16.117221] W [socket.c:514:__socket_rwv] 0-client2: readv failed (No data available)
[2014-05-20 00:01:16.117342] W [socket.c:1962:__socket_proto_state_machine] 0-client2: reading from socket failed. Error (No data available), peer (192.168.100.12:6996)
[2014-05-20 00:01:16.117451] I [client.c:2097:client_rpc_notify] 0-client2: disconnected
[2014-05-20 01:39:41.087290] I [glusterfsd.c:1910:main] 0-glusterfs: Started running glusterfs version 3.4.0 (glusterfs -f /usr/local/etc/glusterfs/glusterfs.vol -l /tmp/glusterfs.log /data/glusterfs/)
[2014-05-20 01:39:41.087813] E [fuse-bridge.c:4973:init] 0-fuse: Mountpoint /data/glusterfs/ seems to have a stale mount, run 'umount /data/glusterfs/' and try again.
[2014-05-20 01:39:41.087846] E [xlator.c:390:xlator_init] 0-fuse: Initialization of volume 'fuse' failed, review your volfile again
^C
[root@weihao04-vm glusterfs]#
<span style="color:black;"><span style="font-size:14px;"> </span></span>
在log中发现了一个错误: run 'umount /data/glusterfs/' and try again.所以我们需要先执行: umount /data/glusterfs/然后再次执行。
[root@weihao04-vm glusterfs]# umount /data/glusterfs/
[root@weihao04-vm glusterfs]# glusterfs -f /usr/local/etc/glusterfs/glusterfs.vol -l /tmp/glusterfs.log /data/glusterfs/
[root@weihao04-vm glusterfs]# tail -f /tmp/glusterfs.log
[2014-05-20 01:52:36.453956] I [client-handshake.c:1456:client_setvolume_cbk] 0-client1: Connected to 192.168.100.11:6996, attached to remote volume 'locker'.
[2014-05-20 01:52:36.453996] I [client-handshake.c:1468:client_setvolume_cbk] 0-client1: Server and Client lk-version numbers are not same, reopening the fds
[2014-05-20 01:52:36.454074] I [afr-common.c:3698:afr_notify] 0-bricks: Subvolume 'client1' came back up; going online.
[2014-05-20 01:52:36.454284] I [client-handshake.c:450:client_set_lk_version_cbk] 0-client1: Server lk version = 1
[2014-05-20 01:52:36.456319] I [client-handshake.c:1456:client_setvolume_cbk] 0-client2: Connected to 192.168.100.12:6996, attached to remote volume 'locker'.
[2014-05-20 01:52:36.456345] I [client-handshake.c:1468:client_setvolume_cbk] 0-client2: Server and Client lk-version numbers are not same, reopening the fds
[2014-05-20 01:52:36.477231] I [fuse-bridge.c:4726:fuse_graph_setup] 0-fuse: switched to graph 0
[2014-05-20 01:52:36.477459] I [client-handshake.c:450:client_set_lk_version_cbk] 0-client2: Server lk version = 1
[2014-05-20 01:52:36.477662] I [fuse-bridge.c:3682:fuse_init] 0-glusterfs-fuse: FUSE inited with protocol versions: glusterfs 7.13 kernel 7.13
[2014-05-20 01:52:36.478430] I [afr-common.c:2057:afr_set_root_inode_on_first_lookup] 0-bricks: added root inode
这样就OK了,现在进入clinet创建和操作文件,发现server端也会有对应的文件。
其他问题点:
在client端启动glusterfs后,可能会发现如下错误:
[2014-05-19 10:20:56.398025] W [socket.c:514:__socket_rwv] 0-client2: readv failed (No data available)
[2014-05-19 10:20:56.398302] I [fuse-bridge.c:3682:fuse_init] 0-glusterfs-fuse: FUSE inited with protocol versions: glusterfs 7.13 kernel 7.13
[2014-05-19 10:20:59.389793] W [socket.c:514:__socket_rwv] 0-client1: readv failed (No data available)
[2014-05-19 10:21:00.393177] W [socket.c:514:__socket_rwv] 0-client2: readv failed (No data available)
[2014-05-19 10:21:02.396760] W [socket.c:514:__socket_rwv] 0-client1: readv failed (No data available)
[2014-05-19 10:21:03.399659] W [socket.c:514:__socket_rwv] 0-client2: readv failed (No data available)
没有和server1和server2创建连接,然后查看iptables发现对端口进行了限制,删除设定的ipables(客户端和服务端都要进行修改),创建链接成功。
三 GlusterFS常用的中继介绍
1. storage/posix #指定一个本地目录给GlusterFS内的一个卷使用;
2. protocol/server #服务器中继,表示此节点在GlusterFS中为服务器模式,可以说明其IP、守护端口、访问权限;
3. protocol/client #客户端中继,用于客户端连接服务器时使用,需要指明服务器IP和定义好的卷;
4. cluster/replicate #复制中继,备份文件时使用,若某子卷掉了,系统仍能正常工作,子卷起来后自动更新(通过客户端);
5. cluster/distribute #分布式中继,可以把两个卷或子卷组成一个大卷,实现多存储空间的聚合;
6. features/locks #锁中继,只能用于服务器端的posix中继之上,表示给这个卷提供加锁(fcntl locking)的功能;
7. performance/read-ahead #预读中继,属于性能调整中继的一种,用预读的方式提高读取的性能,有利于应用频繁持续性的访问文件,当应用完成当前数据块读取的时候,下一个数据块就已经准备好了,主要是在IB-verbs或10G的以太网上使用;
8. performance/write-behind #回写中继,属于性能调整中继的一种,作用是在写数据时,先写入缓存内,再写入硬盘,以提高写入的性能,适合用于服务器端;
9. performance/io-threads #IO线程中继,属于性能调整中继的一种,由于glusterfs服务是单线程的,使用IO 线程转换器可以较大的提高性能,这个转换器最好是被用于服务器端;
10. performance/io-cache #IO缓存中继,属于性能调整中继的一种,作用是缓存住已经被读过的数据,以提高IO性能,当IO 缓存中继检测到有写操作的时候,它就会把相应的文件从缓存中删除,需要设置文件匹配列表及其设置的优先级等内容;
11. cluster/stripe #条带中继,将单个大文件分成多个小文件存于各个服务器中,实现大文件的分块存储。