安装
yum install moosefs-cgi-3.0.97-1.rhsysv.x86_64.rpm moosefs-cgiserv-3.0.97-1.rhsysv.x86_64.rpm moosefs-master-3.0.97-1.rhsysv.x86_64.rpm -y
编辑配置文件
cd /etc/mfs/
vim mfsmaster.cfg
# name of process to place in syslog messages (default is mfsmaster)
# SYSLOG_IDENT = mfsmaster
SYSLOG_IDENT默认为mfsmaster,若不想使用默认可自行更改
主机名解析
vim /etc/hosts
172.25.31.1 mfsmaster
开启服务
/etc/init.d/moosefs-master start
netstat -antlup
开启服务
/etc/init.d/moosefs-cgiserv start
netstat -antlup | grep 9425
在server2上(server3和server2做同样的操作)
yum install moosefs-chunkserver-3.0.97-1.rhsysv.x86_64.rpm -y
编辑配置文件
vim /etc/mfs/mfshdd.cfg
在最后一行加上
/mnt/chunk1
创建目录修改权限
mkdir /mnt/chunk1
chown mfs.mfs /mnt/chunk1
添加主机名解析
vim /etc/hosts
172.25.31.1 mfsmaster
开启服务
/etc/init.d/moosefs-chunkserver start
访问http://172.25.31.1:9425查看
在客户端测试(客户端ip:172.25.31.250)
安装客户端
yum install -y moosefs-client-3.0.97-1.rhsystemd.x86_64.rpm
编辑配置文件
vim /etc/mfs/mfsmount.cfg
在结尾加上
/mnt/mfs
cd /mnt/mfs/
mkdir dir1
mkdir dir2
[root@foundation31 mfs]# mfsgetgoal dir1/
dir1/: 2
[root@foundation31 mfs]# mfsgetgoal dir2/
dir2/: 2
[root@foundation31 mfs]# mfssetgoal -r 1 dir1/
dir1/:
inodes with goal changed: 1
inodes with goal not changed: 0
inodes with permission denied: 0
[root@foundation31 mfs]# mfsgetgoal dir1/
dir1/: 1
[root@foundation31 mfs]# cd dir1
[root@foundation31 dir1]# cp /etc/passwd .
[root@foundation31 dir1]# ls
passwd
[root@foundation31 dir1]# mfsfileinfo passwd
passwd:
chunk 0: 0000000000000001_00000001 / (id:1 ver:1)
copy 1: 172.25.31.3:9422 (status:VALID)
[root@foundation31 dir1]# cd …/dir2
[root@foundation31 dir2]# cp /etc/passwd .
[root@foundation31 dir2]# mfsfileinfo passwd
passwd:
chunk 0: 0000000000000002_00000001 / (id:2 ver:1)
copy 1: 172.25.31.2:9422 (status:VALID)
copy 2: 172.25.31.3:9422 (status:VALID)
[root@server3 ~]# /etc/init.d/moosefs-chunkserver stop
Stopping mfschunkserver: [ OK ]
[root@foundation31 dir2]# mfsfileinfo passwd
passwd:
chunk 0: 0000000000000002_00000001 / (id:2 ver:1)
copy 1: 172.25.31.2:9422 (status:VALID)
[root@foundation31 dir2]# cd …/dir1
[root@foundation31 dir1]# mfsfileinfo passwd
passwd:
chunk 0: 0000000000000001_00000001 / (id:1 ver:1)
no valid copies !!!
[root@server3 ~]# /etc/init.d/moosefs-chunkserver start
Starting mfschunkserver: [ OK ]
[root@foundation31 dir1]# mfsfileinfo passwd
passwd:
chunk 0: 0000000000000001_00000001 / (id:1 ver:1)
copy 1: 172.25.31.3:9422 (status:VALID)
[root@foundation31 dir1]# dd if=/dev/zero of=file bs=1M count=100
[root@foundation31 dir1]# mfsfileinfo file
file:
chunk 0: 0000000000000003_00000001 / (id:3 ver:1)
copy 1: 172.25.31.3:9422 (status:VALID)
chunk 1: 0000000000000004_00000001 / (id:4 ver:1)
copy 1: 172.25.31.2:9422 (status:VALID)
[root@foundation31 dir1]# cd …/dir2
[root@foundation31 dir2]# dd if=/dev/zero of=file bs=1M count=100
[root@foundation31 dir2]# mfsfileinfo file
file:
chunk 0: 0000000000000005_00000001 / (id:5 ver:1)
copy 1: 172.25.31.2:9422 (status:VALID)
copy 2: 172.25.31.3:9422 (status:VALID)
chunk 1: 0000000000000006_00000001 / (id:6 ver:1)
copy 1: 172.25.31.2:9422 (status:VALID)
copy 2: 172.25.31.3:9422 (status:VALID)
[root@foundation31 dir1]# rm -fr passwd
[root@foundation31 dir1]# ls
file
[root@foundation31 dir1]# mfsgettrashtime .
.: 86400
[root@foundation31 dir1]# mkdir /mnt/mfsmeta
[root@foundation31 dir1]# mfsmount -m /mnt/mfsmeta/
mfsmaster accepted connection with parameters: read-write,restricted_ip
[root@foundation31 dir1]# mount
[root@foundation31 dir1]# cd /mnt/mfsmeta/trash/
[root@foundation31 trash]# ll | grep undel
d-w------- 4098 root root 0 Aug 26 10:48 undel
[root@foundation31 trash]# find -name passwd
./004/00000004|dir1|passwd
[root@foundation31 trash]# mv 004/00000004|dir1|passwd undel/
[root@foundation31 trash]# cd /mnt/mfs/dir1/
[root@foundation31 dir1]# ls
file passwd
高可用
在server1、4上做同样的操作
安装软件
yum install pacemaker corosyns -y
yum install crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm
配置/corosync
cd /etc/corosync/
cp corosync.conf.example corosync.conf
vim corosync.conf
修改bindnetaddr一行为
bindnetaddr: 172.25.31.0 #绑定的网络地址
添加
service { #定义一个服务来启动pacemaker
ver: 0 #启动corosync时会自动启动pacemaker
name: pacemaker
}
开启服务
/etc/init.d/corosync start
打开crm监控,查看节点server1,server2是否在线
crm_mon
搭建iSCSI共享存储
在server2上安装
yum install scsi-* -y
编辑配置文件
vim /etc/tgt/targets.conf
# Sample target with one LUN only. Defaults to allow access for all initiators:
<target iqn.2018-08.com.example:server.target1>
backing-store /dev/vdb
</target>
启动tgtd
/etc/init.d/tgtd start
在server1上安装
mkdir /etc/cluster/
yum install -y iscsi-*
登陆
iscsiadm -m discovery -t st -p 172.25.31.2
iscsiadm -m node -l
查看
fdisk -l
格式化
mkfs.ext4 /dev/sda
将mfs的数据文件拷贝到sda中(注意权限)
cd /var/lib/mfs/
mount /dev/sda /mnt
cp -p * /mnt/
ll /mnt/
umount /mnt
挂载后文件的权限会改变,修改权限为mfs.mfs
mount /dev/sda /var/lib/mfs/
ll -d /var/lib/mfs/
chown mfs.mfs /var/lib/mfs/
ll -d /var/lib/mfs/
umount /var/lib/mfs/
修改脚本,当moosefs-master异常关闭时,moosefs-master将不能正常开启,所以修改脚本为
vim /etc/init.d/moosefs-master #修改start函数
start () {
echo -n $"Starting $prog: "
$prog start >/dev/null 2>&1 || $prog -a >/dev/null 2>&1 && success || failure
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/$prog
return $RETVAL
}
在server4上
yum install moosefs-master-3.0.97-1.rhsysv.x86_64.rpm -y
mount /dev/sda /var/lib/mfs/
ll /var/lib/mfs/
/etc/init.d/moosefs-master start
配置集群
查看fence设备
stonith_admin -I
我们要用到的是fence_xvm
如果没有,安装fence-virt即可
查看fence_xvm信息
stonith_admin -M -a fence_xvm
查看fence_xvm.key
ll /etc/cluster/fence_xvm.key
如果没有fence_xvm.key ,由物理机生成再scp到虚拟机即可
开始配置集群
crm(live)# configure
crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# property stonith-enabled=true
crm(live)configure# commit
crm(live)configure# primitive vmfnce stonith:fence_xvm params pcmk_host_map="server1:vm1;server4:vm4" op monitor interval=1min
crm(live)configure# commit
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.31.100 cidr_netmask=32 op monitor interval=30s
crm(live)configure# commit
crm(live)configure# primitive mfsdata ocf:heartbeat:Filesystem params device=/dev/sda directory=/var/lib/mfs fstype=ext4 op monitor interval=1min
crm(live)configure# primitive mfsmaster lsb:moosefs-master op monitor interval=30s
crm(live)configure# group mfsgroup vip mfsdata mfsmaster
crm(live)configure# commit
crm(live)configure# bye
bye