二十九节课预习

Linux集群架构(二)

目录

八、LVS DR模式搭建
九、keepalived + LVS
十、扩展

 

八、LVS DR模式搭建

1、实验环境:

四台机器:

client: 192.168.76.150

Director节点: (ens32 192.168.76.155 vip ens32:0 192.168.76.158)

Real server1: (ens32 192.168.76.156 vip lo:0 192.168.76.158)

Real server2: (ens32 192.168.76.157 vip lo:0 192.168.76.158)

2、安装

//两台real server需安装web服务。之前已经装过,略过//director安装ipvsadm软件包,可参考lvs nat部分

[root@lvs-dr ~]# yum -y install ipvsadm

3、在director上配置脚本

[root@lvs-dr1 ~]# vim /usr/local/sbin/lvs-dr.sh

#!/bin/bash

echo 1 > /proc/sys/net/ipv4/ip_feorward

ipv=/usr/sbin/ipvsadm

vip=192.168.76.158

rs1=192.168.76.156

rs2=192.168.76.157

ifconfig ens32:0 $vip broadcast $vip netmask 255.255.255.255 up

route add -host $vip dev ens32:0$ipv -C$ipv -A -t $vip:80 -s rr$ipv -a -t $vip:80 -r $rs1:80 -g -w 3$ipv -a -t $vip:80 -r $rs2:80 -g -w 1

 

//赋予755权限,

[root@lvs-dr1 ~]# chmod 755 /usr/local/sbin/lvs-dr.sh

 

//执行脚本

[root@lvs-dr1 ~]# /usr/local/sbin/lvs-dr.sh

 

//查看状态

[root@lvs-dr1 ~]# chmod 755 /usr/local/sbin/lvs-dr.sh^C

[root@lvs-dr1 ~]# ipvsadm -L -n

IP Virtual Server version 1.2.1 (size=4096)

Prot LocalAddress:Port Scheduler Flags

  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn

TCP  192.168.76.158:80 rr

  -> 192.168.76.156:80                 Route   3      0          0         

  -> 192.168.76.157:80                 Route   1      0          0         

4、在两台real server配置脚本

[root@lvs-backend1 ~]# vim /usr/local/sbin/lvs-dr-rs.sh

#!/bin/bash

vip=192.168.76.158

ifconfig lo:0 $vip broadcast $vip netmask 255.255.255.255 up

route add -host $vip lo:0

echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore

echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce

echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore

echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce

 

//赋予755权限,然后执行

[root@lvs-backend1 ~]# chmod 755 /usr/local/sbin/lvs-dr-rs.sh  

 

//执行

[root@lvs-backend1 ~]# /usr/local/sbin/lvs-dr-rs.sh

5、测试

//当前采用的是rr调度算法

Last login: Mon Jul 23 14:47:55 2018

[root@localhost ~]# ip addr1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

       valid_lft forever preferred_lft forever

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

    link/ether 00:0c:29:a2:07:b1 brd ff:ff:ff:ff:ff:ff

    inet 192.168.76.150/24 brd 10.0.1.255 scope global ens32

       valid_lft forever preferred_lft forever

    inet6 fe80::20c:29ff:fea2:7b1/64 scope link

       valid_lft forever preferred_lft forever

[root@localhost ~]# curl 192.168.76.158

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

九、keepalived + LVS

LVS可以实现负载均衡,但是不能够进行健康检查,如一个rs出现故障,LVS 仍然会把请求转发给故障的rs服务器,这就会导致请求的无效性。keepalive 软件可以进行健康检查,而且能同时实现 LVS 的高可用性,解决 LVS 单点故障的问题,其实 keepalive 就是为 LVS 而生的。

1、实验环境

4台节点

Keepalived1 + lvs1(Director1):192.168.76.155
Keepalived2 + lvs2(Director2):192.168.76.159
Real server1:192.168.76.156
Real server2:192.168.76.157
VIP:192.168.76.158

2.软件安装

//Keepalived + lvs两个节点安装

[root@localhost ~]# yum install ipvsadm keepalived -y

//两个read server安装nignx, 之前环境已经安装过,此处略

3.设置配置脚本

//两台real server节点建立脚本

[root@lvs-backend1 ~]# vim /usr/local/sbin/lvs-dr-rs.sh#!/bin/bash

vip=192.168.76.158

ifconfig lo:0 $vip broadcast $vip netmask 255.255.255.255 up

route add -host $vip lo:0

echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore

echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce

echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore

echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce

 

//赋予755权限,然后执行

[root@lvs-backend1 ~]# chmod 755 /usr/local/sbin/lvs-dr-rs.sh  

 

//执行

[root@lvs-backend1 ~]# /usr/local/sbin/lvs-dr-rs.sh

 

//两台keepalived节点配置

//master节点配置文件

[root@lvs-dr1 ~]# vim /etc/keepalived/keepalived.conf

vrrp_instance VI_1 {

    #备用服务器上为 BACKUP

    state MASTER

    interface ens32

    virtual_router_id 51

    #备用服务器上为90

    priority 100

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass aminglinux

    }

    virtual_ipaddress {

        192.168.76.158

    }

}

virtual_server 192.168.76.158 80 {

    #(每隔10秒查询realserver状态)

    delay_loop 10

    #(lvs 算法)

    lb_algo wrr

    #(DR模式)

    lb_kind DR

    #(同一IP的连接60秒内被分配到同一台realserver)

    #实验环境注释掉,不然看不到rr的效果

    #persistence_timeout 60

    #(TCP协议检查realserver状态)

    protocol TCP

 

    real_server 192.168.76.156 80 {

        #(权重)

        weight 1

        TCP_CHECK {

        #(10秒无响应超时)

        connect_timeout 10

        nb_get_retry 3

        delay_before_retry 3

        connect_port 80

        }

    }

    real_server 192.168.76.157 80 {

        weight 1

        TCP_CHECK {

        connect_timeout 10

        nb_get_retry 3

        delay_before_retry 3

        connect_port 80

        }

     }

}

 

//backup节点

[root@lvs-backend2 ~]# vim /etc/keepalived/keepalived.conf

vrrp_instance VI_1 {

    #备用服务器上为 BACKUP

    state BACKUP

    interface ens32

    virtual_router_id 51

    priority 90

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass aminglinux

    }

    virtual_ipaddress {

        192.168.76.158

    }

}

virtual_server 192.168.76.158 80 {

    #(每隔10秒查询realserver状态)

    delay_loop 10

    #(lvs 算法)

    lb_algo rr

    #(DR模式)

    lb_kind DR

    #(同一IP的连接60秒内被分配到同一台realserver)

    #persistence_timeout 60

    #(TCP协议检查realserver状态)

    protocol TCP

 

    real_server 192.168.76.156 80 {

        #(权重)

        weight 1

        TCP_CHECK {

        #(10秒无响应超时)

        connect_timeout 10

        nb_get_retry 3

        delay_before_retry 3

        connect_port 80

        }

    }

    real_server 192.168.76.157 80 {

        weight 1

        TCP_CHECK {

        connect_timeout 10

        nb_get_retry 3

        delay_before_retry 3

        connect_port 80

        }

     }

}

4.在keepalived两个节点开启转发功能

[root@lvs-dr1 ~]# echo 1 > /proc/sys/net/ipv4/ip_forward

5.在两个节点启动keepalive,

[root@lvs-dr1 ~]# systemctl start keepalived.service

[root@lvs-dr2 ~]# systemctl start keepalived.service

6.测试

//测试1:手动关闭192.168.76.156节点的nginx,在客户端上去测试访问

//192.168.76.156上操作

[root@lvs-backend1 ~]# /usr/local/nginx/sbin/nginx -s stop

[root@lvs-backend1 ~]# lsof -i :80

//192.168.76.150客户端上测试

Last login: Mon Jul 23 14:49:10 2018 from 10.0.1.229

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!//结果正常,不会出现访问192.168.76.156节点,一直访问的是192.168.76.157节点的内容。

 

//测试手动重新开启 192.168.76.156 节点的nginx, 在客户端上去测试访问

//192.168.76.156上操作

[root@lvs-backend1 ~]# lsof -i :80

[root@lvs-backend1 ~]# /usr/local/nginx/sbin/nginx

[root@lvs-backend1 ~]# lsof -i :80

COMMAND  PID  USER   FD   TYPE DEVICE SIZE/OFF NODE NAME

nginx   2969  root    6u  IPv4  48805      0t0  TCP *:http (LISTEN)

nginx   2970 nginx    6u  IPv4  48805      0t0  TCP *:http (LISTEN)

nginx   2971 nginx    6u  IPv4  48805      0t0  TCP *:http (LISTEN)

//192.168.76.150上测试

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

//结果正常,按照 rr 调度算法访问192.168.76.156节点和192.168.76.157节点内容。

 

//测试 keepalived HA特性

//ip addr查看,此时192.168.76.158vip在主上

       valid_lft forever preferred_lft forever

[root@lvs-dr1 ~]# ip addr1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

       valid_lft forever preferred_lft forever

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

    link/ether 00:0c:29:85:24:8c brd ff:ff:ff:ff:ff:ff

    inet 192.168.76.155/24 brd 10.0.1.255 scope global ens32

       valid_lft forever preferred_lft forever

    inet 192.168.76.158/32 scope global ens32

       valid_lft forever preferred_lft forever

    inet6 fe80::20c:29ff:fe85:248c/64 scope link 

       valid_lft forever preferred_lft forever

//停止master上的keepalived

[root@lvs-dr1 ~]# systemctl stop keepalived.service

[root@lvs-dr1 ~]#

 

//dr2上查看,vip抢占过来了

[root@lvs-dr2 ~]# ip addr1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

       valid_lft forever preferred_lft forever

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

    link/ether 00:0c:29:dd:53:4e brd ff:ff:ff:ff:ff:ff

    inet 192.168.76.159/24 brd 10.0.1.255 scope global ens32

       valid_lft forever preferred_lft forever

    inet 192.168.76.158/32 scope global ens32

       valid_lft forever preferred_lft forever

    inet6 fe80::c388:e67a:4ac3:6566/64 scope link 

       valid_lft forever preferred_lft forever

 

//192.168.76.150上测试

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

I am Lvs-backend1!!!

[root@localhost ~]# curl 192.168.76.158

I am lvs-backend2!!!

[root@localhost ~]# curl 192.168.76.158

//可以正常访问后端的网站,验证了keepalived的特性

 

//重新开启master上的keepalived

[root@lvs-dr1 ~]# systemctl start keepalived.service

[root@lvs-dr1 ~]# ip add1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

       valid_lft forever preferred_lft forever

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

    link/ether 00:0c:29:85:24:8c brd ff:ff:ff:ff:ff:ff

    inet 192.168.76.155/24 brd 10.0.1.255 scope global ens32

       valid_lft forever preferred_lft forever

    inet 192.168.76.158/32 scope global ens32

       valid_lft forever preferred_lft forever

    inet6 fe80::20c:29ff:fe85:248c/64 scope link 

       valid_lft forever preferred_lft forever

 

十、扩展

heartbeat和keepalived比较

http://blog.csdn.net/yunhua_lee/article/details/9788433

DRBD工作原理和配置

http://502245466.blog.51cto.com/7559397/1298945

mysql+keepalived

http://lizhenliang.blog.51cto.com/7876557/1362313

lvs 三种模式详解

http://www.it165.net/admin/html/201401/2248.html

lvs几种算法

http://www.aminglinux.com/bbs/thread-7407-1-1.html

关于arp_ignore和 arp_announce

http://www.cnblogs.com/lgfeng/archive/2012/10/16/2726308.html

lvs原理相关的

http://blog.csdn.net/pi9nc/article/details/23380589

haproxy+keepalived

http://blog.csdn.net/xrt95050/article/details/40926255

nginx、lvs、haproxy比较

http://www.csdn.net/article/2014-07-24/2820837

keepalived中自定义脚本 vrrp_script

http://my.oschina.net/hncscwc/blog/158746

lvs dr模式只使用一个公网ip的实现方法

http://storysky.blog.51cto.com/628458/338726

 

转载于:https://www.cnblogs.com/linuxzhang/p/9809487.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值