redis+memcache集群1

1 篇文章 0 订阅
1 篇文章 0 订阅

1.安装配置redis的cluster集群

没有学完,待补充

2.安装配置memcached高可用

1.架构图

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-oni70cny-1593922588955)(F6803AB5D0984E00BC5465BAE43FA077)]

2.环境准备

节点应用IP备注
Node12haproxy+keepalived172.16.62.12
Node13haproxy+keepalived172.16.62.13
MemcachedMemcached172.16.62.15 172.16.62.25111211
MemcachedMemcached172.16.62.16 172.16.62.25111211
DNSDNS172.16.62.24
DNSmemcache.haostack.com172.16.62.251解析

3.安装

3.1 haproxy和keepalived 安装

请详见

  • https://blog.csdn.net/weixin_45082683/article/details/106996652
3.2 Memcached 安装
#下载安装包repcache 
http://repcached.sourceforge.net/
[root@web15 html]# yum install libevent libevent-devel

wget https://sourceforge.net/projects/repcached/files/repcached/2.2.1-1.2.8/memcached-1.2.8-repcached-2.2.1.tar.gz
#编译
[root@web15 apps]# tar -xf memcached-1.2.8-repcached-2.2.1.tar.gz -C /usr/local/src/
[root@web15 apps]# cd /usr/local/src/
[root@web15 src]# ls
apache-tomcat-8.5.43  apache-tomcat-8.5.43.tar.gz  memcached-1.2.8-repcached-2.2.1  openssh-7.4p1
[root@web15 src]# cd memcached-1.2.8-repcached-2.2.1/
[root@web15 memcached-1.2.8-repcached-2.2.1]#
#指定目录,开启集群功能
./configure --prefix=/usr/local/repcached --enable-replication 
checking build system type... x86_64-unknown-linux-gnu
checking host system type... x86_64-unknown-linux-gnu
checking target system type... x86_64-unknown-linux-gnu
checking for a BSD-compatible install... /usr/bin/install -c
checking whether build environment is sane... yes
checking for a thread-safe mkdir -p... /usr/bin/mkdir -p
checking for gawk... gawk
checking whether make sets $(MAKE)... yes
checking for gcc... gcc
checking for C compiler default output file name... a.out
checking whether the C compiler works... yes
checking whether we are cross compiling... no
checking for suffix of executables... 
checking for suffix of object files... o
checking whether we are using the GNU C compiler... yes
checking whether gcc accepts -g... yes
checking for gcc option to accept ISO C89... none needed
checking for style of include used by make... GNU
checking dependency style of gcc... gcc3
checking whether gcc and cc understand -c and -o together... yes
checking for a BSD-compatible install... /usr/bin/install -c
checking for libevent directory... (system)
checking for library containing socket... none required
checking for library containing gethostbyname... none required
checking for library containing mallinfo... none required
checking for daemon... yes
checking how to run the C preprocessor... gcc -E
checking for grep that handles long lines and -e... /usr/bin/grep
checking for egrep... /usr/bin/grep -E
checking for ANSI C header files... yes
checking for sys/types.h... yes
checking for sys/stat.h... yes
checking for stdlib.h... yes
checking for string.h... yes
checking for memory.h... yes
checking for strings.h... yes
checking for inttypes.h... yes
checking for stdint.h... yes
checking for unistd.h... yes
checking for stdbool.h that conforms to C99... yes
checking for _Bool... yes
checking for an ANSI C-conforming const... yes
checking malloc.h usability... yes
checking malloc.h presence... yes
checking for malloc.h... yes
checking for struct mallinfo.arena... yes
checking for socklen_t... yes
checking for endianness... little
checking for mlockall... yes
checking for getpagesizes... no
checking for memcntl... no
configure: creating ./config.status
config.status: creating Makefile
config.status: creating doc/Makefile
config.status: creating config.h
config.status: executing depfiles commands
[root@web15 memcached-1.2.8-repcached-2.2.1]#



#make 有报错
[root@web16 memcached-1.2.8-repcached-2.2.1]# make
make  all-recursive
make[1]: Entering directory `/usr/local/src/memcached-1.2.8-repcached-2.2.1'
Making all in doc
make[2]: Entering directory `/usr/local/src/memcached-1.2.8-repcached-2.2.1/doc'
make[2]: Nothing to be done for `all'.
make[2]: Leaving directory `/usr/local/src/memcached-1.2.8-repcached-2.2.1/doc'
make[2]: Entering directory `/usr/local/src/memcached-1.2.8-repcached-2.2.1'
gcc -DHAVE_CONFIG_H -I.  -DNDEBUG   -g -O2 -MT memcached-memcached.o -MD -MP -MF .deps/memcached-memcached.Tpo -c -o memcached-memcached.o `test -f 'memcached.c' || echo './'`memcached.c
memcached.c: In function ‘add_iov’:
memcached.c:697:30: error: ‘IOV_MAX’ undeclared (first use in this function)
         if (m->msg_iovlen == IOV_MAX ||
                              ^
memcached.c:697:30: note: each undeclared identifier is reported only once for each function it appears in
make[2]: *** [memcached-memcached.o] Error 1
make[2]: Leaving directory `/usr/local/src/memcached-1.2.8-repcached-2.2.1'
make[1]: *** [all-recursive] Error 1
make[1]: Leaving directory `/usr/local/src/memcached-1.2.8-repcached-2.2.1'
make: *** [all] Error 2

#解决办法
 56 #ifndef IOV_MAX
  57 #if defined(__FreeBSD__) || defined(__APPLE__)
  58 # define IOV_MAX 1024
  59 #endif
  60 #endif
#更改如下内容
55 /* FreeBSD 4.x doesn't have IOV_MAX exposed. */
  56 #ifndef IOV_MAX
  57 # define IOV_MAX 1024
  58 #endif


#验证安装是否可行
[root@web16 repcached]# /usr/local/repcached/bin/memcached -h
memcached 1.2.8
repcached 2.2.1
-p <num>      TCP port number to listen on (default: 11211)
-U <num>      UDP port number to listen on (default: 11211, 0 is off)
-s <file>     unix socket path to listen on (disables network support)
-a <mask>     access mask for unix socket, in octal (default 0700)
-l <ip_addr>  interface to listen on, default is INDRR_ANY
-d            run as a daemon
-r            maximize core file limit
-u <username> assume identity of <username> (only when run as root)
-m <num>      max memory to use for items in megabytes, default is 64 MB
-M            return error on memory exhausted (rather than removing items)
-c <num>      max simultaneous connections, default is 1024
-k            lock down all paged memory.  Note that there is a
              limit on how much memory you may lock.  Trying to
              allocate more than that would fail, so be sure you
              set the limit correctly for the user you started
              the daemon with (not for -u <username> user;
              under sh this is done with 'ulimit -S -l NUM_KB').
-v            verbose (print errors/warnings while in event loop)
-vv           very verbose (also print client commands/reponses)
-h            print this help and exit
-i            print memcached and libevent license
-P <file>     save PID in <file>, only used with -d option
-f <factor>   chunk size growth factor, default 1.25
-n <bytes>    minimum space allocated for key+value+flags, default 48
-R            Maximum number of requests per event
              limits the number of requests process for a given con nection
              to prevent starvation.  default 20
-b            Set the backlog queue limit (default 1024)
-x <ip_addr>  hostname or IP address of peer repcached
-X <num:num>  TCP port number for replication. <listen:connect> (default: 11212)
[root@web16 repcached]#

#实现原理
在 master上可以通过 -X指定 replication port,在 slave上通过 -x/-X找到 master并
connect上去,事实上,如果同时指定了 -x/-X,
repcached一定会尝试连接,但如果连接失败,它就会用
-X参数来自己 listen(成为 master);如果
master坏掉, slave侦测到连接断了,它会自动
listen而成为 master;而如果 slave坏掉,
master也会侦测到连接断,它就会重新 listen等待新的
slave加入。
从这方案的技术实现来看,其实它是一个单 master单
slave的方案,但它的 master/slave都是可读写的,而且可以相互同步,所以从功能上看,也可以认为它是双机 master-master方案。



#启动服务
#memcache15上,配置对方的IP和端口
#验证端口启动成功,16000是一个检测端口,当双方检测到对方都有16000的时候会关闭,检测到对方没有启动则自己才会启动:

#配置memcache服务并设置从什么地方同步数据
[root@web15 apps]# /usr/local/repcached/bin/memcached -d -m 2048 -p 11211 -u memcached -c 2048 -x 172.16.62.16 -X 16000 
[root@web15 apps]# ss -tnl
State       Recv-Q Send-Q                                                           Local Address:Port                                                                          Peer Address:Port              
LISTEN      0      128                                                                          *:11211                                                                                    *:*                  
LISTEN      0      128                                                                          *:11212                                                                                    *:*                  
LISTEN      0      128                                                                          *:2222                                                                                     *:*                  
LISTEN      0      128                                                                          *:80                                                                                       *:*                  
LISTEN      0      128                                                                          *:81                                                                                       *:*                  
LISTEN      0      128                                                                          *:16000                                                                                    *:*                  
LISTEN      0      100                                                                         :::8009                                                                                    :::*                  
LISTEN      0      128                                                                         :::11211                                                                                   :::*                  
LISTEN      0      128                                                                         :::2222                                                                                    :::*                  
LISTEN      0      100                                                                         :::8080                                                                                    :::*                  
LISTEN      0      1                                                             ::ffff:127.0.0.1:8005                                                                                    :::*                  
[root@web15 apps]# 



#memcache16上 11211和16000 都已经监控
#和node15的memcache保持一致即互为主备,配置对端IP
[root@web16 apps]# /usr/local/repcached/bin/memcached -d -m 2048 -p 11211 -u memcached -c 2048 -x 172.16.62.16 -X 16000
[root@web16 apps]# ss -tnl
State       Recv-Q Send-Q                                                           Local Address:Port                                                                          Peer Address:Port              
LISTEN      0      128                                                                  127.0.0.1:9000                                                                                     *:*                  
LISTEN      0      128                                                                          *:11211                                                                                    *:*                  
LISTEN      0      128                                                                          *:11212                                                                                    *:*                  
LISTEN      0      128                                                                          *:2222                                                                                     *:*                  
LISTEN      0      128                                                                          *:111                                                                                      *:*                  
LISTEN      0      128                                                                          *:80                                                                                       *:*                  
LISTEN      0      128                                                                          *:81                                                                                       *:*                  
LISTEN      0      128                                                                          *:443                                                                                      *:*                  
LISTEN      0      128                                                                          *:16000                                                                                    *:*                  
LISTEN      0      80                                                                          :::3306                                                                                    :::*                  
LISTEN      0      128                                                                         :::11211                                                                                   :::*                  
LISTEN      0      128                                                                         :::2222                                                                                    :::*                  
LISTEN      0      128                                                                         :::111                                                                                     :::*                  
[root@web16 apps]# 

4.配置

#pip安装
yum install python-pip
安装memcached
[root@web15 apps]# pip install memcached
Collecting memcached
  Could not find a version that satisfies the requirement memcached (from versions: )
No matching distribution found for memcached
You are using pip version 8.1.2, however version 20.1.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
[root@web15 apps]# pip install --upgrade
You must give at least one requirement to install (see "pip help install")
You are using pip version 8.1.2, however version 20.1.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
#升级pip版本
[root@web15 apps]# pip install --upgrade pip
Collecting pip
  Downloading https://files.pythonhosted.org/packages/43/84/23ed6a1796480a6f1a2d38f2802901d078266bda38388954d01d3f2e821d/pip-20.1.1-py2.py3-none-any.whl (1.5MB)
    100% |████████████████████████████████| 1.5MB 18kB/s 
Installing collected packages: pip
  Found existing installation: pip 8.1.2
    Uninstalling pip-8.1.2:
      Successfully uninstalled pip-8.1.2
Successfully installed pip-20.1.1


#安装python-memcached包
[root@web15 apps]# pip install python-memcached
DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support
Collecting python-memcached
  Downloading python_memcached-1.59-py2.py3-none-any.whl (16 kB)
Requirement already satisfied: six>=1.4.0 in /usr/lib/python2.7/site-packages (from python-memcached) (1.9.0)
Installing collected packages: python-memcached
Successfully installed python-memcached-1.59
[root@web15 apps]# python
Python 2.7.5 (default, Apr  2 2020, 13:16:51) 
[GCC 4.8.5 20150623 (Red Hat 4.8.5-39)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import memcache #导入memcache模块
>>>


#Python连接脚本
[root@web15 apps]# more memcached-cli.py 
#!/usr/bin/env python
#coding:utf-8
#Author:jack
import memcache
m = memcache.Client(['172.16.62.15:11211'], debug=True)
for i in range(100):
 	m.set("key%d" % i,"v%d" % i)
 	ret = m.get('key%d' % i)
	print ret
[root@web15 apps]#
在node15上生成数据
[root@web15 apps]# python memcached-cli.py 
webmemcache0
webmemcache1
webmemcache2
webmemcache3
webmemcache4
webmemcache5
webmemcache6
webmemcache7
webmemcache8
webmemcache9
webmemcache10
............
webmemcache99
#在node16上生成数据
[root@web16 apps]# python memcache-cli.py 
v0
v1
v2
v3
v4
v5
v6
v7
v8
v9
v10
............
v99

5.haproxy和keepalived配置

5.1 haproxy配置
#node12节点haproxy 配置
[root@node12 haproxy]# more haproxy.cfg 
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------


listen apachecluster
bind 0.0.0.0:81 #监听IP和端口
mode http  #代理协议
log global   #日志
balance roundrobin #负载均衡策略
server apache1 172.16.62.12:81  weight 1 check inter 3000 fall 2 rise 5
server apache2 172.16.62.15:81  weight 1 check inter 3000 fall 2 rise 5

listen nginxcluster
bind 0.0.0.0:80 #监听IP和端口
mode http #代理协议
log global #日志
balance roundrobin #负载均衡策略
server nginx1 172.16.62.15:80 weight 1 check inter 3000 fall 2 rise 5
server nginx2 172.16.62.16:80 weight 1 check inter 3000 fall 2 rise 5

listen memcachedluster
bind 0.0.0.0:11211  #监听IP和端口
mode tcp #代理协议
log global #日志
balance roundrobin #负载均衡策略
server memcached1 172.16.62.15:11211 weight 1 check inter 3000 fall 2 rise 5
server memcached2 172.16.62.16:11211 weight 1 check inter 3000 fall 2 rise 5

#重启服务,查看监听端口11211
 
[root@node12 haproxy]# systemctl restart haproxy
[root@node12 haproxy]# ss -tnl
State       Recv-Q Send-Q                                                           Local Address:Port                                                                          Peer Address:Port              
LISTEN      0      128                                                                          *:80                                                                                       *:*                  
LISTEN      0      128                                                                          *:81                                                                                       *:*                  
LISTEN      0      128                                                                          *:22                                                                                       *:*                  
LISTEN      0      128                                                                          *:11211                                                                                    *:*                  
LISTEN      0      128                                                                       [::]:22                                                                                    [::]:*                  
[root@node12 haproxy]#



#node13节点haproxy 配置

[root@node13 haproxy]# more haproxy.cfg 
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------

listen apachecluster
bind 0.0.0.0:81 #监听IP和端口
mode http  #代理协议
log global   #日志
balance roundrobin #负载均衡策略
server apache1 172.16.62.12:81  weight 1 check inter 3000 fall 2 rise 5
server apache2 172.16.62.15:81  weight 1 check inter 3000 fall 2 rise 5

listen nginxcluster
bind 0.0.0.0:80 #监听IP和端口
mode http #代理协议
log global #日志
balance roundrobin #负载均衡策略
server nginx1 172.16.62.15:80 weight 1 check inter 3000 fall 2 rise 5
server nginx2 172.16.62.16:80 weight 1 check inter 3000 fall 2 rise 5

listen memcachedluster
bind 0.0.0.0:11211  #监听IP和端口
mode tcp #代理协议
log global #日志
balance roundrobin #负载均衡策略
server memcached1 172.16.62.15:11211 weight 1 check inter 3000 fall 2 rise 5
server memcached2 172.16.62.16:11211 weight 1 check inter 3000 fall 2 rise 5


#重启服务,查看监听端口11211

[root@node13 ~]# systemctl restart haproxy
[root@node13 ~]# ss -tnl
State       Recv-Q Send-Q                                                           Local Address:Port                                                                          Peer Address:Port              
LISTEN      0      128                                                                          *:11211                                                                                    *:*                  
LISTEN      0      128                                                                          *:80                                                                                       *:*                  
LISTEN      0      128                                                                          *:81                                                                                       *:*                  
LISTEN      0      128                                                                          *:22                                                                                       *:*                  
LISTEN      0      128                                                                       [::]:22                                                                                    [::]:*  
5.2 keeaplived配置
#node12 keepalived配置,使用vip 172.16.62.249
[root@node12 keepalived]# more keepalived.conf
! Configuration File for keepalived

global_defs {       #全局定义块
   notification_email {    #邮件通知配置,用于服务有故障时发送邮件报警,可选项
     test@qq.com     #邮件接收者
   }
   notification_email_from root@qq.com #邮件发送者
   smtp_server 127.0.0.1    #smtp邮件服务器
   smtp_connect_timeout 30  #超时时间
   router_id node12.haostack.com    # 标识本节点的字条串,通常为hostname,但不一定非得是hostname。故障发生时,邮件通知会用到
   vrrp_skip_check_adv_addr  #所有报文都检查比较消耗性能,此配置为如果收到的报文和上一个报文是同一个路由器则跳过检查报文中的源地址
   #vrrp_strict      #严格遵守VRRP协议,不允许状况:1,没有VIP地址,2.配置了单播邻居,3.在VRRP版本2中有IPv6地址.
   vrrp_garp_interval 0  #ARP报文发送延迟 
   vrrp_gna_interval 0  #消息发送延迟
   vrrp_iptables #需要关闭vrrp_iptables规则,否则vip ping不通
}
#HAProxy健康检查
vrrp_script chk_haproxy {
   script "/etc/keepalived/chk_haproxy.sh"  #脚本目录
   interval 1   #脚本运行周期
   weight -80   #降权重
   fall 3     #失败次数
   rise 5     #重试次数
   timeout 2  #超时
   }

vrrp_instance VI_200 {  #VRRP实例定义块
    state MASTER    #标识当前节点的状态,可以是MASTER或BACKUP,当其他节点keepalived启动时会将priority比较大的节点选举为MASTER
    interface eth0  #节点固有IP(非VIP)的网卡,用来发VRRP包
    virtual_router_id 200 #值在0-255之间,用来区分多个instance的VRRP组播。同一网段中virtual_router_id的值不能重复,否则会出错
    priority 100    #用来选举master的,要成为master,那么这个选项的值最好高于其他机器50个点,该项取值范围是[1-254](在此范围之外会被识别成默认值100)
    advert_int 2  ##发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
    #nopreempt  #非抢占模式,不会回来
    #preempt_delay 60s  #抢占模式延迟
    unicast_src_ip 172.16.62.12 #配置单播本机地址,需要禁用vrrp_strict
    unicast_peer {             #配置对端单播地址
       172.16.62.13
     }
    authentication {   #发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        172.16.62.248
        172.16.62.249
        172.16.62.250
        172.16.62.251
        172.16.62.252
    }
    #以下三行为调用通知脚本
    #notify_master "/etc/keepalived/notify.sh master"   
    #当前节点成为主节点时触发的脚本 
    ##notify_backup "/etc/keepalived/notify.sh backup"
    #当前节点转为备节点时触发的脚本
    ##notify_fault "/etc/keepalived/notify.sh fault"
    #当前节点转为失败状态时触发的脚本
track_script {
  chk_haproxy   #调用chk_haproxy脚本名称
  }
}
[root@node12 keepalived]#

#node13 keepalived配置,使用vip 172.16.62.251

[root@node13 keepalived]# more keepalived.conf 
! Configuration File for keepalived

global_defs {       #全局定义块
   notification_email {    #邮件通知配置,用于服务有故障时发送邮件报警,可选项
     test@qq.com     #邮件接收者
   }
   notification_email_from root@qq.com #邮件发送者
   smtp_server 127.0.0.1    #smtp邮件服务器
   smtp_connect_timeout 30  #超时时间
   router_id node13.haostack.com    # 标识本节点的字条串,通常为hostname,但不一定非得是hostname。故障发生时,邮件通知会用到
   vrrp_skip_check_adv_addr  #所有报文都检查比较消耗性能,此配置为如果收到的报文和上一个报文是同一个路由器则跳过检查报文中的源地址
   #vrrp_strict      #严格遵守VRRP协议,不允许状况:1,没有VIP地址,2.配置了单播邻居,3.在VRRP版本2中有IPv6地址.
   vrrp_garp_interval 0  #ARP报文发送延迟 
   vrrp_gna_interval 0  #消息发送延迟
   vrrp_iptables #需要关闭vrrp_iptables规则,否则vip ping不通
}

vrrp_instance VI_200 {  #VRRP实例定义块
    state BACKUP   #标识当前节点的状态,可以是MASTER或BACKUP,当其他节点keepalived启动时会将priority比较大的节点选举为MASTER
    interface eth0  #节点固有IP(非VIP)的网卡,用来发VRRP包
    virtual_router_id 200 #值在0-255之间,用来区分多个instance的VRRP组播。同一网段中virtual_router_id的值不能重复,否则会出错
    priority 80    #用来选举master的,要成为master,那么这个选项的值最好高于其他机器50个点,该项取值范围是[1-254](在此范围之外会被识别成默认值100)
    advert_int 2  ##发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
    #nopreempt  #非抢占模式,不会回来
    #preempt_delay 60s  #抢占模式延迟
    unicast_src_ip 172.16.62.13 #配置单播本机地址,需要禁用vrrp_strict
    unicast_peer {             #配置对端单播地址
       172.16.62.12
     }
    authentication {   #发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        172.16.62.248
        172.16.62.249
        172.16.62.250
        172.16.62.251
        172.16.62.252
    }
    #以下三行为调用通知脚本
    #notify_master "/etc/keepalived/notify.sh master"   
    #当前节点成为主节点时触发的脚本 
    #notify_backup "/etc/keepalived/notify.sh backup"
    #当前节点转为备节点时触发的脚本
    #notify_fault "/etc/keepalived/notify.sh fault"
    #当前节点转为失败状态时触发的脚本
}
[root@node13 keepalived]# 
5.3 memcache客户端配置VIP
#node15上配置VIP 172.16.62.251
[root@web15 tmp]# bash  lvs_dr_251.sh start
lvs_dr_251.sh: line 19: dev/null: No such file or directory
RealServer Start OK
[root@web15 tmp]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet 172.16.62.251/32 brd 172.16.62.251 scope global lo:0
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
    link/ether 00:50:56:a0:cf:97 brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.15/24 brd 172.16.62.255 scope global eth0
       valid_lft forever preferred_lft forever
[root@web15 tmp]# 

#node16上配置VIP 172.16.62.251
[root@web16 tmp]# bash lvs_dr_251.sh start
lvs_dr_251.sh: line 19: dev/null: No such file or directory
RealServer Start OK
[root@web16 tmp]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet 172.16.62.251/32 brd 172.16.62.251 scope global lo:0
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
    link/ether 00:50:56:a0:18:39 brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.16/24 brd 172.16.62.255 scope global eth0
       valid_lft forever preferred_lft forever
[root@web16 tmp]#

6.测试

6.1 单节点测试
#node15上测试
[root@node14 tmp]# telnet 172.16.62.15 11211
Trying 172.16.62.15...
Connected to 172.16.62.15.
Escape character is '^]'.
get key1
VALUE key1 0 9
memcache1
END
get key99
VALUE key99 0 10
memcache99
END
get key1
VALUE key1 0 9
memcache1
END
get key99
VALUE key99 0 10
memcache99
END
#node16上测试
[root@node14 tmp]# telnet 172.16.62.16 11211
Trying 172.16.62.16...
Connected to 172.16.62.16.
Escape character is '^]'.
get key1
VALUE key1 0 2
v1
END
get key88
VALUE key88 0 3
v88
EN

6.2 高可用测试
6.2.1 结合haproxy测试memcached
#在DNS上做好解析
[root@node12 haproxy]# telnet memcache.haostack.com  11211
Trying 172.16.62.251...
Connected to memcache.haostack.com.
Escape character is '^]'.
get key1
VALUE key1 0 2
v1
END
get key99
VALUE key99 0 3
v99
END

6.2.2 关闭memcache1节点172.16.62.15,然后测试
[root@node12 haproxy]# ping 172.16.62.15
PING 172.16.62.15 (172.16.62.15) 56(84) bytes of data.



^C
--- 172.16.62.15 ping statistics ---
6 packets transmitted, 0 received, 100% packet loss, time 4999ms

[root@node12 haproxy]# telnet memcache.haostack.com  11211
Trying 172.16.62.251...
Connected to memcache.haostack.com.
Escape character is '^]'.
get key1
VALUE key1 0 2
v1
END
get key99
VALUE key99 0 3
v99
END

6.2.3 关闭haproxy节点 172.16.62.12,然后测试
#现在VIP在node12上
[root@node12 haproxy]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:a4:f5 brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.12/24 brd 172.16.62.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.248/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.249/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.250/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.251/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.252/32 scope global eth0
       valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:dd:03 brd ff:ff:ff:ff:ff:ff
    inet 192.168.100.12/24 brd 192.168.100.255 scope global noprefixroute eth1
       valid_lft forever preferred_lft forever
[root@node12 haproxy]# 

#关闭node12,VIP在node13上
[root@node12 haproxy]# systemctl stop  keepalived
[root@node12 haproxy]#
#VIP飘到Node13上
[root@node13 haproxy]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:98:7e brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.13/24 brd 172.16.62.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.248/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.249/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.250/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.251/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.252/32 scope global eth0
       valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:cd:85 brd ff:ff:ff:ff:ff:ff
    inet 192.168.100.13/24 brd 192.168.100.255 scope global noprefixroute eth1
       valid_lft forever preferred_lft forever
[root@node13 haproxy]

#测试memcache 
[root@node14 ~]# telnet memcache.haostack.com  11211
Trying 172.16.62.251...
Connected to memcache.haostack.com.
Escape character is '^]'.
get key1
VALUE key1 0 2
v1
END
get key90
VALUE key90 0 3
v90
END

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值