小知识点:
# yum-config-manager --add ftp:192.168.4.11/pub/iso
# vim /etc/yum.repos.d/192.168.4.11_pub_iso.repo
gpgcheck=0


HTTP状态码:
200:ok
3xx:重定向
4xx:http错误
5xx:服务器内部错误


MB/s 字节    Mbps 比特

调度器:
LVS只到第四层(到端口号---》传输层)  
HAProxy到七层
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
————————————————————————————————————————————

装源码包的时候要学会看安装说明 一般是这两个文件需要装什么依赖包这里面也会详细说明。
install
README
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++


HAproxy---->程序haproxy

配置HAProxy

1.准备虚拟机vh05.tedu.cn 192.168.4.5/24
2.安装依赖包
#yum install -y gcc pcre-devel pcre
3.安装haproxy

# tar  -zxvf haproxy-1.4.24.tar.gz
# cd haproxy-1.4.24
# uname -r
2.6.32-573.el6.x86_64
# make TARGET=linux2628 USE_PCRE=1 PREFIX=/usr/local/haproxy install

3.修改haproxy的配置文件,做链接
# ls /usr/local/haproxy/
  doc  sbin  share
# ln -s /usr/local/haproxy/sbin/haproxy  /usr/local/sbin
#mkdir /usr/local/haproxy/conf
#cp  examples/haproxy.cfg /usr/local/haproxy/conf/  

# vim /usr/local/haproxy/conf/haproxy.cfg
global
        log 127.0.0.1   local0
        log 127.0.0.1   local1 notice
        #log loghost    local0 info
        maxconn 4096
        chroot /usr/local/haproxy     (添加)
        pidfile /var/run/haproxy.pid    (添加)
        uid 99
        gid 99
        daemon
        #debug
        #quiet
defaults
         stats  uri /mon        (添加健康检查监控网页)
        log     global
        mode    http
        option  httplog
        option  dontlognull
        retries 3
        option redispatch    (添加)
        maxconn 2000
        contimeout      5000
        clitimeout      50000
        srvtimeout      50000
listen  appli1-rewrite 0.0.0.0:80        (修改下监控的web服务器)
        cookie  SERVERID rewrite
        balance roundrobin
        server  web_1 192.168.4.1:80 cookie app1inst1 check inter 2000 rise 2 fall 5
        server  app1_2 192.168.4.2:80 cookie app1inst2 check inter 2000 rise 2 fall 5
5.启动服务
# haproxy -f /usr/local/haproxy/conf/haproxy.cfg
6.测试:客户机(192.168.4.254)
# firefox http://192.168.4.5/
出现网页文件

# firefox http://192.168.4.5/mon  
打开的是健康监控web服务状态页面


7.配置日志服务器:(把HAproxy服务的日志在系统日志记录中看)
# vim /etc/rsyslog.conf  (开启系统日志接收tcp和udp服务的日志)
 13 $ModLoad imudp
 14 $UDPServerRun 514
 15
 16 # Provides TCP syslog reception
 17 $ModLoad imtcp
 18 $InputTCPServerRun 514

# service rsyslog restart

# netstat -anptu |grep :514
tcp        0      0 0.0.0.0:514                 0.0.0.0:*                   LISTEN      3034/rsyslogd       

# tailf /var/log/messages

(真机:192.168.4.254)# firefox http://192.168.4.5/
这时上面系统消息中就会记录


8.修改启动脚本
#pidof haproxy  (查看haproxy服务pid)
#kill $(pidof haproxy)
#pidof haproxy
#haproxy -f /usr/local/haproxy/conf/haproxy.cfg -st $(cat /var/run/haproxy.pid)   重启服务
#pidof haproxy
#cp examples/haproxy.init  /etc/init.d/haproxy
#chmod +x /etc/init.d/haproxy (复制启动脚本)
#ll -ld /etc/init.d/haproxy
#service haproxy status (查看启动脚本是否能正常使用,不能就该脚本文件)
#vim /etc/init.d/haproxy(修改脚本)
修改的内容参照 haproxy1.sh

#service haproxy status (查看脚本现在是否能正常使用)
#service haproxy start
#service haproxy status


这里vh05上做的HAproxy配置,也是给web服务端做负载均衡高可用集群的调度(整体就是一个集群),客户端通过调度器它来访问后端的web服务,当一台web服务挂掉,在调度器健康监控状态网页中就会变红显示,当修复好了,就自动变回正常颜色(相当于lvs做的脚本剔除或加进来)。

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

HSRP:热备份路由协议    思科所有

VRRP:虚拟冗余路由协议  IETF公有


#^4.1 ^4.2
[root@room1pc01 桌面]# scp keepalived-1.2.7.tar.gz  root@192.168.4.1:/root
keepalived-1.2.7.tar.gz                       100%  283KB 282.6KB/s   00:00    
[root@room1pc01 桌面]# ^4.1^4.2
scp keepalived-1.2.7.tar.gz  root@192.168.4.2:/root
keepalived-1.2.7.tar.gz                       100%  283KB 282.6KB/s   00:00    
[root@room1pc01 桌面]#
——————————————————————————————————————————————————————————————————

keepalived热备实现的是高可用集群(当一个节点出现故障另一个节点代替它工作)
配置高可用集群:

1.在两台web服务器上安装依赖包
vh01:
#yum -y install gcc kernel-devel popt-devel openssl-devel
2.安装
#unzip keepalived-1.2.7.tar.gz
#tar -zxf keepalived-1.2.7.tar.gz
#cd keepalived-1.2.7
#./configure --sysconf=/etc
#make && make install
3.配置keepalived
# ln -s /usr/local/sbin/keepalived  /usr/sbin
#vim /etc/keepalived/keepalived.conf

 15 vrrp_instance VI_1 {
 16     state MASTER
 17     interface eth0
 18     virtual_router_id 51
 19     priority 100
 20     advert_int 1
 21     authentication {
 22         auth_type PASS
 23         auth_pass 1111
  24     }
 25      virtual_ipaddress {
  26     192.168.4.200
  27       }
  28   }

4.启动并验证:
#service keepalived start
# ip a s eth0      (可以看到vip)
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 54:52:01:01:06:01 brd ff:ff:ff:ff:ff:ff
    inet 192.168.4.2/24 brd 192.168.4.255 scope global eth0
    inet 192.168.4.200/32 scope global eth0
    inet6 fe80::5652:1ff:fe01:601/64 scope link
       valid_lft forever preferred_lft forever

vh02:
#yum -y install gcc kernel-devel popt-devel openssl-devel
#unzip keepalived-1.2.7.tar.gz
#tar -zxf keepalived-1.2.7.tar.gz
#cd keepalived-1.2.7
#./configure --sysconf=/etc
#make && make install
# ln -s /usr/local/sbin/keepalived  /usr/sbin
#vim /etc/keepalived/keepalived.conf

vrrp_instance VI_1 {
    state SLAVE
    interface eth0
    virtual_router_id 51
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.4.200

    }
}

启动并验证:
#service keepalived restart

这时关了vh01--》# service keepalived stop,vh02(就会看到虚拟ip)

# ip a s eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 54:52:01:01:06:01 brd ff:ff:ff:ff:ff:ff
    inet 192.168.4.2/24 brd 192.168.4.255 scope global eth0
    inet 192.168.4.200/32 scope global eth0
    inet6 fe80::5652:1ff:fe01:601/64 scope link
       valid_lft forever preferred_lft forever

(vh01和vh02这里设置的是主从)
——————————————————————————————————————————
——————————————————————————————————————————
如果vh01和vh02也可以作为互为主主
++++++++++
vh01:
 15 vrrp_instance VI_1 {
 16     state MASTER
 17     interface eth0
 18     virtual_router_id 51
 19     priority 100
 20     advert_int 1
 21     authentication {
 22         auth_type PASS
 23         auth_pass 1111
  24     }
 25     virtual_ipaddress {
 26   192.168.4.200
 27    }
 28 }
 29 vrrp_instance VI_2 {
 30     state SLAVE
 31     interface eth0
 32     virtual_router_id 52
 33     priority 90
 34     advert_int 1
 35     authentication {
 36         auth_type PASS
 37         auth_pass 2222
 38     }
 39     virtual_ipaddress {
 40   192.168.4.201
 41    }
 42 }

vh02:
 15 vrrp_instance VI_1 {
 16     state SLAVE
 17     interface eth0
 18     virtual_router_id 51
 19     priority 90
 20     advert_int 1
 21     authentication {
 22         auth_type PASS
 23         auth_pass 1111
  24     }
 25     virtual_ipaddress {
 26         192.168.4.200
 27
 28     }
 29 }
 30
 31 vrrp_instance VI_2 {
 32     state MASTER
 33     interface eth0
 34     virtual_router_id 52
 35     priority 100
 36     advert_int 1
 37     authentication {
 38         auth_type PASS
 39         auth_pass 2222
 40     }
 41     virtual_ipaddress {
 42   192.168.4.201
 43    }
 44 }

[root@vh01 keepalived-1.2.7]# service keepalived start
正在启动 keepalived:                                      [确定]
[root@vh01 keepalived-1.2.7]# !i
ip a s eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 54:52:01:01:05:01 brd ff:ff:ff:ff:ff:ff
    inet 192.168.4.1/24 brd 192.168.4.255 scope global eth0
    inet 192.168.4.200/32 scope global eth0
    inet6 fe80::5652:1ff:fe01:501/64 scope link
       valid_lft forever preferred_lft forever


[root@vh02 keepalived-1.2.7]# ip a s eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 54:52:01:01:06:01 brd ff:ff:ff:ff:ff:ff
    inet 192.168.4.2/24 brd 192.168.4.255 scope global eth0
    inet 192.168.4.201/32 scope global eth0
    inet6 fe80::5652:1ff:fe01:601/64 scope link
       valid_lft forever preferred_lft forever

[root@vh01 keepalived-1.2.7]# service httpd stop

[root@vh02 keepalived-1.2.7]# ip a s eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 54:52:01:01:06:01 brd ff:ff:ff:ff:ff:ff
    inet 192.168.4.2/24 brd 192.168.4.255 scope global eth0
    inet 192.168.4.201/32 scope global eth0
    inet 192.168.4.200/32 scope global eth0
    inet6 fe80::5652:1ff:fe01:601/64 scope link
       valid_lft forever preferred_lft forever

——————————————————————————————————————————————————————————————
——————————————————————————————————————————————————————————————

配置高可用,负载均衡的web集群

1.清理两台web服务器上的keepalived

#service keepalived stop

2.real servel需要修改内核参数,在lo上配置vip(因为昨天做实验做了,在那基础上做实验,所以现在不需要再改)

3、把vh04上eth0端口的vip移除,vip由keepalived决定出现在哪一台调度器上
# rm -f /etc/sysconfig/network-scripts/ifcfg-eth0:0
# service network restart

4.清理vh04上的lvs规则(前一天搭的)
# ipvsadm -D -t 192.168.4.100:80

5.创建虚拟机vh06.tedu.cn 192.168.4.6/24 (装好yum,指定Lo。。仓库)
6.在vh06上安装ipvsadm (vh04昨天已经安装好了)
#yum install -y ipvsadm

7、在vh04和vh06两台调度器上配置keepalived
# yum install -y gcc kernel-devel popt-devel openssl-devel
# tar xzf keepalived-1.2.7.tar.gz
# cd keepalived-1.2.7
#  make && make install
#ln -s /usr/local/sbin/keepalived  /usr/sbin

9、修改vh04和vh06keepalived配置文件,添加虚拟ip,同时指定主从(活跃和备份)的调度器
vh04:做主
# vim /etc/keepalived/keepalived.conf

 10    smtp_server 127.0.0.1


 15 vrrp_instance VI_1 {
 16     state MASTER
 17     interface eth0
 18     virtual_router_id 51
 19     priority 100
 20     advert_int 1
 21     authentication {
 22         auth_type PASS
 23         auth_pass 1111
 24     }
 25     virtual_ipaddress {
 26       192.168.4.100
 27    }
 28 }
 29
 30 virtual_server 192.168.4.100  80 {
 31     delay_loop 6
 32     lb_algo rr
 33     lb_kind DR
 34     nat_mask 255.255.255.0
 35     persistence_timeout 50
 36     protocol TCP
 37
38     real_server 192.168.4.1 80 {
 39         weight 1
 40            TCP_CHECK {
 41             connect_timeout 3
 42             nb_get_retry 3
 43             delay_before_retry 3
 44         }
 45     }
 46    real_server 192.168.4.2 80 {
 47         weight 1
 48            TCP_CHECK {
 49             connect_timeout 3
 50             nb_get_retry 3
 51             delay_before_retry 3
 52         }
 53     }
 54 }


# service keepalived restart

# ip a s eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 54:52:01:01:08:01 brd ff:ff:ff:ff:ff:ff
    inet 192.168.4.4/24 brd 192.168.4.255 scope global eth0
    inet 192.168.4.100/32 scope global eth0
    inet6 fe80::5652:1ff:fe01:801/64 scope link
       valid_lft forever preferred_lft forever

#scp /etc/keepalived/keepalived.conf root@192.168.4.6:/etc/keepalived/keepalived.conf



vh06:
#vim /etc/keepalived/keepalived.conf
 和vh04就是这16和19行不一样,vh06做从
 16     state SLAVE
 17     interface eth0
 18     virtual_router_id 51
 19     priority 90

# service keepalived restart

# ip a s eth0 (因为vh06是做从,所以只有当vh04,service keepalived stop,挂掉,从才会启用,虚拟ip就才到vh06上)
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 54:52:01:01:08:01 brd ff:ff:ff:ff:ff:ff
    inet 192.168.4.4/24 brd 192.168.4.255 scope global eth0
    inet 192.168.4.100/32 scope global eth0
    inet6 fe80::5652:1ff:fe01:801/64 scope link
       valid_lft forever preferred_lft forever


vh04和vh06都开启这两个服务
# service ipvsadm start
# service keepalived start

调度器vh04和vh06上都能看到lvs规则监控到所对应的web服务端状态
# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.4.100:80 rr persistent 50
  -> 192.168.4.1:80               Route   1      0          0         
  -> 192.168.4.2:80               Route   1      0          0      

在这:vh04和vh06两台调度器做的是keepalived高可用集群解决单点故障问题,同时都搭了LVS-DR模式为后端web服务做的负载均衡。vh04做的是主,vh06是从,正常情况下只有vh04在做调度工作,当有客户端访问时,vh04:ipvsadm -Ln会更新后面客户访问web的数据,调度算法,虚拟主机,和真实web主机地址等。而vh06数据会一直是0显示,当主vh04挂了,vh06会自动成备份变成主,代替vh04做调度工作,当客户端访问时:vh06:ipvsadm -Ln会更新后面web访问的数据情况。当vh04调度器修复好后会自动把主从vh06上抢过来,这时vh06又成备份调度器了。(当一台web挂掉,会自动从lvs规则中剔除,修复好自动加进,因为vh04跑了一个监控web服务的脚本,昨天的脚本)