文章尚未完成请勿参考

环境fedora15

(一)配置心跳 lvs

环境:
两台心跳服务器
lvs1 192.168.0.94

VIP:192.168.0.96---心跳IP

lvs2 192.168.0.95

真实服务器:192.168.0.91 gfs-node-01
真实服务器:192.168.0.92 gfs-node-02

数据存储服务器:192.168.0.93 share
---------------------------------------------------------
LVS1 配置如下

iptables -F
iptables -X
service iptables save
ip6tables -F
ip6tables -X
service ip6tables save

关闭selinux
setenforce 0

安装包
yum -y install piranha

2、给LVS 图形配置界面设置一个密码
[root@lvs1 ~]# piranha-passwd   
service piranha-gui start

chkconfig pulse on
chkconfig piranha-gui on
chkconfig httpd on

 http://192.168.0.94:3636

 

 

 

 

 

 

service pulse start

copy /etc/piranha/lvs.cf root@192.168.0.95:/ect/piraha/

 


以下两条命令在真机上执行
gfs-node-01# iptables  -t nat  -A PREROUTING  -p tcp  -d 192.168.0.96  --dport
80 -j REDIRECT

gfs-node-02# iptables  -t nat  -A PREROUTING  -p tcp  -d 192.168.0.96  --dport
80 -j REDIRECT

 

 

[root@lvs01-master piranha]# cat lvs.cf
serial_no = 76
primary = 192.168.0.94
service = lvs
backup_active = 1
backup = 192.168.0.95
heartbeat = 1
heartbeat_port = 539
keepalive = 6
deadtime = 18
network = direct
debug_level = NONE
monitor_links = 0
syncdaemon = 0
virtual http {
     active = 1
     address = 192.168.0.96 eth0:1
     vip_nmask = 255.255.255.0
     port = 80
     send = "GET / HTTP/1.0\r\n\r\n"
     expect = "HTTP"
     use_regex = 0
     load_monitor = none
     scheduler = rr
     protocol = tcp
     timeout = 6
     reentry = 15
     quiesce_server = 1
     server gfs-node-01 {
         address = 192.168.0.91
         active = 1
         weight = 1
     }
     server gfs-node-02 {
         address = 192.168.0.92
         active = 1
         weight = 1
     }
}

 

 ---------------------------------------------------------------------------------------------------

(二 ) 共享硬盘服务器 192.168.0.93 share

1.关闭selinux 防火墙
iptables -F
iptables -X
service iptables save
ip6tables -F
ip6tables -X
service ip6tables save

setenforce 0

2.安装服务端
yum install scsi-target-utils iscsi-initiator-utils -y

service tgtd start
chkconfig tgtd on

3.
a).新增target device命令如下:
# tgtadm --lld iscsi --op new --mode target --tid 1 -T rhcs-storage

查看target device
#tgtadm --lld iscsi --op show --mode target

b).将起初新建的分区加入target device。
# tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 -b /dev/sda6

c).查看加入target device后的输出内容:
# tgtadm --lld iscsi --op show --mode target
            
d).设置可以访问存取此target device的initiator节点。本机允许gfs-node-01 gfs-node-02 存取,设置如下:
#tgtadm --lld iscsi --op bind --mode target --tid 1 -I 192.168.0.92                 
#tgtadm --lld iscsi --op bind --mode target --tid 1 -I 192.168.0.93

 

 

-------------------------------------------------------------------------------------------
gfs-node-01 
gfs-node-02


一、关闭防火墙selinux
iptables -F
iptables -X
service iptables save
ip6tables -F
ip6tables -X
service ip6tables save

setenforce 0
二、编辑hosts
vim /etc/hosts
192.168.0.91 gfs-node-01
192.168.0.92 gfs-node-02

三、安装 luci来配置ha

yum install ricci luci  openais cman rgmanager lvm2-cluster gfs2-utils


service ricci start
service luci start
service rgmanager start
service clvmd start
service cman start
serice gfs2 start

chkconfig rgmanager on
chkconfig clvmd on
chkconfig cman on
chkconfig gfs2 on
chkconfig ricci on
chkconfig acpid off
chknconfig sendmail off

 

 

客户端

检测服务端是否正常识别
[root@client01 ~]# iscsiadm -m discovery -t sendtargets -p 192.168.0.93:3260

登入服务端
[root@client01 ~]# iscsiadm -m node -T storage -p 192.168.0.93:3260 -l

退出服务端iscsiadm -m node -T storage -p 192.168.0.93:3260 -u

 

创建物理卷
[root@gfs-node-01 ~]# pvcreate -ff /dev/sdc
  Physical volume "/dev/sdc" successfully created
创建卷组
[root@gfs-node-01 ~]# vgcreate gfsvg /dev/sdc
  Clustered volume group "gfsvg" successfully created
创建逻辑卷
[root@gfs-node-01 ~]# lvcreate -L 4G -n vg01 gfsvg


mkfs.gfs2 -p lock_dlm -t cluster-gfs:data1 -j 2 /dev/gfsvg/vg01

mount 挂载 由于没有fence设备所以....

 -----------------------------------------------------------------------------------------------

Disk /dev/mapper/share-share10 doesn't contain a valid partition table
[root@gfs-node-01 /]# lvcreate -L 10G -n share10 share^C
[root@gfs-node-01 /]# lvre
lvreduce  lvremove  lvrename  lvresize
[root@gfs-node-01 /]# lvremove share10
  Volume group "share10" not found
  Skipping volume group share10
[root@gfs-node-01 /]# lvremove /dev/
Display all 186 possibilities? (y or n)

[root@gfs-node-01 /]# lvremove /dev/mapper/share-share10
Do you really want to remove active logical volume share10? [y/n]: y

  Logical volume "share10" successfully removed
[root@gfs-node-01 /]# vgremove share

  Volume group "share" successfully removed
[root@gfs-node-01 /]# pvremove /dev/sda6
  Labels on physical volume "/dev/sda6" succ