安装saltstack,基于saltstack实现keepalived+haproxy+nginx高可用

安装saltstack

#saltstack介绍

Salt,一种全新的基础设施管理方式,部署轻松,在几分钟内可运行起来,扩展性好,很容易管理上万台服务器,速度够快,服务器之间秒级通讯

主要功能
远程执行
配置管理

Stalstack官方文档

[root@node1 ~]# vim /etc/hosts
192.168.10.110 master.sls.com
192.168.10.111 web01.sls.com
192.168.10.112 web02.sls.com

[root@node1 ~]# scp /etc/hosts 192.168.10.111:/etc/hosts

[root@node1 ~]# scp /etc/hosts 192.168.10.112:/etc/hosts

配置 yum 仓库

[root@node1 ~]# vim /etc/yum.repos.d/local.repo 
[local]
name=local repo
baseurl=file:///media/cdrom
enabled=1
gpgcheck=0

[sls]
name=sls repo
baseurl=http://192.168.10.114/sls
enabled=1
gpgcheck=0

[nginx]
name=nginx repo
baseurl=http://192.168.10.114/nginx_fpm_redis
enabled=1
gpgcheck=0

#上面是我自己搭建的yum仓库,只能内网访问
[root@node1 ~]# scp /etc/yum.repos.d/local.repo 192.168.10.111:/etc/yum.repos.d/local.repo 
root@192.168.10.111's password: 
local.repo                                100%  247     0.2KB/s   00:00    
[root@node1 ~]# scp /etc/yum.repos.d/local.repo 192.168.10.112:/etc/yum.repos.d/local.repo 
root@192.168.10.112's password: 
local.repo                                100%  247     0.2KB/s   00:00    
[root@node1 ~]# date
2021年 03月 16日 星期二 11:05:37 CST
#确认时间是否同步,没有则要做时间同步

主机名修改

[root@node1 ~]# hostnamectl set-hostname master.sls.com
[root@node1 ~]# bash

安装 master

[root@master ~]# yum -y install salt-master.noarch 

安装 minion#

[root@web01 ~]# yum -y install salt-minion
**[root@master ~]# vim /etc/salt/master 
15 interface: 192.168.10.110
#修改监听地址.注意为了安全,监听的地址一定要写内网地 址。
215 auto_accept: False
#设置被控端自动认证,只要被控端设置完主控端的 IP 地址 后启动服务,主控端是否自动允许被控端认证
406 file_roots:
407   base:
408     - /srv/salt/
#修改 SaltStack 文件根目录位置,去掉注释即可
552 pillar_opts: True
#开启 pillar 功能。
529 pillar_roots:
530   base:
531     - /srv/pillar**
 **#设置 pillar 的主目录,去掉注释即可**

修好开启改

[root@master ~]# systemctl enable --now salt-master.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/salt-master.service to /usr/lib/systemd/system/salt-master.service.
[root@master ~]# ss -tnl
State      Recv-Q Send-Q Local Address:Port               Peer Address:Port              
LISTEN     0      128        *:22                     *:*                  
**LISTEN     0      100    192.168.10.110:4505                   *:*                  
LISTEN     0      100    192.168.10.110:4506                   *:***                  
LISTEN     0      128       :::22                    :::*                  

创建 salt 文件根目录及 pillar 目录

[root@master ~]# mkdir -pv /srv/{salt,pillar}
mkdir: 已创建目录 "/srv/salt"
mkdir: 已创建目录 "/srv/pillar"

修改minion端配置文件

[root@web01 ~]# vim /etc/salt/minion
16 master: master.sls.com
#设置主控端 IP
78 id: web01.sls.com
#设置被控主机名
[root@web01 ~]# scp  /etc/salt/minion 192.168.10.112:/etc/salt/minion 
[root@web02 ~]# vim /etc/salt/minion 
78 id: web02.sls.com
[root@web01 ~]# systemctl --now enable salt-minion.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/salt-minion.service to /usr/lib/systemd/system/salt-minion.service.
[root@master ~]# salt-key -L
Accepted Keys: #已经接受的key
Denied Keys:   #拒绝的key
Unaccepted Keys: #未加入的key
web01.sls.com
web02.sls.com
Rejected Keys: #吊销的key
查看 minion 端的注册申请
[root@master ~]# salt-key -A
The following keys are going to be accepted:
Unaccepted Keys:
web01.sls.com
web02.sls.com
Proceed? [n/Y] Y
Key for minion web01.sls.com accepted.
Key for minion web02.sls.com accepted.
允许所有注册申请
[root@master ~]# salt-key -L
Accepted Keys:
web01.sls.com
web02.sls.com
Denied Keys:
Unaccepted Keys:
Rejected Keys:

#常用参数
-L  #查看KEY状态
-A  #允许所有
-D  #删除所有
-a  #认证指定的key
-d  #删除指定的key
-r  #注销掉指定key(该状态为未被认证)
测试是否建立连接
[root@master ~]# netstat -anput | grep ES
tcp        0      0 192.168.10.110:4506     192.168.10.111:39870    ESTABLISHED 12093/python        
tcp        0      0 192.168.10.110:4505     192.168.10.112:45764    ESTABLISHED 12081/python        
tcp        0      0 192.168.10.110:4506     192.168.10.112:38962    ESTABLISHED 12093/python        
tcp        0      0 192.168.10.110:4505     192.168.10.111:33670    ESTABLISHED 12081/python        
tcp        0     36 192.168.10.110:22       192.168.10.1:65502      ESTABLISHED 2743/sshd: root@pts 
udp        0      0 192.168.10.110:36195    124.108.20.1:123        ESTABLISHED 635/chronyd

[root@master ~]# tree /etc/salt/pki/
/etc/salt/pki/
└── master
    ├── master.pem
    ├── master.pub
    ├── minions
    │   ├── web01.sls.com
    │   └── web02.sls.com
    ├── minions_autosign
    ├── minions_denied
    ├── minions_pre
    └── minions_rejected

6 directories, 4 files

以上saltstack就部署完成啦

部署Keepalived

通常情况下,编译安装软件需要用到 gcc、make 等这些基础环境软件包或者需要一些 依赖包,而很多被控端可能没有这些编译环境。这里把三个开源软件依赖的软件包一起先进行安装,所以需要使用 SaltStack 进行配置依赖包安装。

为了方便统一管理,创建不同目录进行区分。

[root@master ~]# mkdir -p /srv/salt/pkg
[root@master ~]# vim /srv/salt/pkg/make.sls
make-pkg: 
  pkg.installed: 
    - pkgs: 
      - make 
      - gcc 
      - gcc-c++ 
      - autoconf 
      - openssl 
      - openssl-devel 
      - pcre 
      - pcre-devel 
      - zlib 
      - zlib-devel

存放 Keepalived 源码包和一些它的相关配置文件

[root@master ~]# mkdir -p /srv/salt/keepalived/files
[root@master ~]# tar xf keepalived-1.2.13.tar.gz 
[root@master ~]# cp keepalived-1.2.13.tar.gz /srv/salt/keepalived/files
[root@master ~]# cp keepalived-1.2.13/keepalived/etc/init.d/keepalived.init /srv/salt/keepalived/files/keepalived
[root@master ~]# cp keepalived-1.2.13/keepalived/etc/keepalived/keepalived.conf /srv/salt/keepalived/files
[root@master ~]# cp keepalived-1.2.13/keepalived/etc/init.d/keepalived.sysconfig /srv/salt/keepalived/files/keepalived.sys
[root@master ~]# ls /srv/salt/keepalived/files/
keepalived                keepalived.conf
keepalived-1.2.13.tar.gz  keepalived.sys

修改keepalived.conf 配置文件

vim /srv/salt/keepalived/files/keepalived.conf

[root@master ~]# vim /srv/salt/keepalived/files/keepalived.conf 
! Configuration File for keepalived

global_defs { 
    router_id LVS_DEVEL 
} 

vrrp_instance VI_1 { 
    {% if grains['fqdn'] == 'web01.sls.com' %} //通过主机名判断主从
    state MASTER 
    priority 100 
    {% elif grains['fqdn'] == 'web02.sls.com' %} 
    state BACKUP 
    priority 90 
    {% endif %} 
    interface eth0 
    virtual_router_id 51
    advert_int 1 
    authentication { 
        auth_type PASS 
        auth_pass 1111 
    } 
    virtual_ipaddress { 
        192.168.11.110  #VIP
    }
}

编写执行动作 install.sls

vim /srv/salt/keepalived/install.sls

     [root@master ~]# vim /srv/salt/keepalived/install.sls
     include:
       - pkg.make  //载入基础环境 make.sls 文件
     keepalived-install://拷贝源码包
       file.managed: 
         - name: /usr/local/src/keepalived-1.2.13.tar.gz 
         - source: salt://keepalived/files/keepalived-1.2.13.tar.gz
       cmd.run://安装 keepalived 软件
         - name: cd /usr/local/src && tar zxf keepalived-1.2.13.tar.gz && cd keepalived-1.2.13 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV && make && make install 
         - unless: test -d /usr/local/keepalived//如果目录存 在就不执行上面的 name
         - require:#依赖源码包
           - file: keepalived-install 
     
     /usr/sbin/keepalived://创建软连接
       file.symlink: 
         - target: /usr/local/keepalived/sbin/keepalived 
      
     /etc/keepalived: //创建目录
       file.directory: 
         - mode: 755 
     
     /etc/sysconfig/keepalived://拷贝文件
       file.managed: 
         - source: salt://keepalived/files/keepalived.sys 
         - mode: 644 
         - user: root 
     /etc/init.d/keepalived: 
       file.managed: 
         - source: salt://keepalived/files/keepalived
         - mode: 755 
         - user: root 
       cmd.run: 
         - name: chkconfig --add keepalived 
         - unless: chkconfig --list |grep keepalived 
     
     /etc/keepalived/keepalived.conf: 
       file.managed: 
         - source: salt://keepalived/files/keepalived.conf 
         - mode: 644 
         - template: jinja
     ```

     编写服务启动 service.sls,文件内容如下所示。

     vim /srv/salt/keepalived/service.sls
  •    [root@master ~]# vim /srv/salt/keepalived/service.sls
       include: 
         - keepalived.install//载入上述创建的 install.sls 文件
       keepalived-service: //服务启动
         service.running: 
           - name: keepalived 
           - enable: True 
           - reload: True 
           - watch: 
             - file: /etc/keepalived/keepalived.conf
    

编写统一入口文件 top.sls

vim /srv/salt/top.sls

[root@master ~]# vim /srv/salt/top.sls
base: 
  'web0[1-2].sls.com': 
    - keepalived.service

主控端执行推送动作,两台被控端安装 Keepalived。第一次执行时间比较长,执行完成后可以看到每台被控端上面成功执行任务 ID

[root@master ~]# salt ‘*’ state.highstate

部署 Haproxy

创建 haproxy 目录,目录结构和 keepalived 基本保持一致

[root@master ~]# mkdir -pv /srv/salt/haproxy/files
mkdir: 已创建目录 "/srv/salt/haproxy"
mkdir: 已创建目录 "/srv/salt/haproxy/files"
[root@master ~]# tar xf haproxy-1.5.19.tar.gz 
[root@master ~]# cp /root/haproxy-1.5.19.tar.gz /srv/salt/haproxy/files/
[root@master ~]# cp /root/haproxy-1.5.19/examples/haproxy.cfg /srv/salt/haproxy/files
[root@master ~]# cp /root/haproxy-1.5.19/examples/haproxy.init /srv/salt/haproxy/files/haproxy
[root@master ~]# ls /srv/salt/haproxy/files
haproxy  haproxy-1.5.19.tar.gz  haproxy.cfg

修改Haproxy 配置文件

[root@master ~]# vim /srv/salt/haproxy/files/haproxy.cfg
global
    log 127.0.0.1 local0
    chroot /var/lib/haproxy
    pidfile /var/run/haproxy.pid 
    maxconn 10000 
    user haproxy 
    group haproxy 
    daemon 
defaults 
    mode http 
    log global 
    option httplog 
    option dontlognull 
    option http-server-close 
    option redispatch 
    retries 3 
    timeout http-request 10s 
    timeout queue 1m 
    timeout connect 10s 
    timeout client 1m 
    timeout server 1m 
    timeout http-keep-alive 10s 
    timeout check 10s 
    maxconn 5000
listen stats *:10000 //查看 haproxy 状态 
    mode http 
    stats enable 
    stats uri / 
    stats refresh 5s 
    stats show-node 
    stats show-legends 
    stats hide-version 
listen test 0.0.0.0:80 //监听 80 端口,所以 nginx 需要修改 为 8888 端口 
    mode tcp 
    option tcplog
    timeout client 10800s 
    timeout server 10800s 
    balance leastconn 
    option tcp-check 
    default-server port 8888 inter 2s downinter 5s rise 3 fall 2 slowstart 60s maxconn 5000 maxqueue 250 weight 100 
    server test-node1 web01.sls.com:8888 check 
    server test-node2 web02.sls.com:8888 check

从 Haproxy 的配置文件看到服务运行使用的是 haproxy 用户。编译安装不会创建用户,所以需要在启动服务之前创建,编写 haproxy.sls 文件

[root@master ~]# mkdir /srv/salt/user
[root@master ~]# vim /srv/salt/user/haproxy.sls
haproxy: 
  group.present: 
    - gid: 300 
  user.present: 
    - uid: 300 
    - gid: 300 
    - shell: /sbin/nologin 
    - home: /var/lib/haproxy

编写 Haproxy 的安装 install.sls 文件

[root@master ~]# vim /srv/salt/haproxy/install.sls
include: 
  - user.haproxy 
haproxy-install: 
  file.managed: 
    - name: /usr/local/src/haproxy-1.5.19.tar.gz 
    - source: salt://haproxy/files/haproxy-1.5.19.tar.gz 
  cmd.run: 
    - name: cd /usr/local/src && tar xf haproxy-1.5.19.tar.gz && cd haproxy-1.5.19 && make TARGET=linux31 PREFIX=/usr/local/haproxy && make install PREFIX=/usr/local/haproxy 
    - unless: test -d /usr/local/haproxy #如果/usr/local/haproxy目录存在,就不执行上一步 
    - require: 
    #依赖源码包 
      - file: haproxy-install 
/usr/sbin/haproxy: 
  file.symlink: 
    - target: /usr/local/haproxy/sbin/haproxy 
/etc/haproxy: 
  file.directory: 
    - mode: 755 
/etc/haproxy/haproxy.cfg: 
  file.managed: 
    - source: salt://haproxy/files/haproxy.cfg 
    - mode: 644 
/etc/init.d/haproxy: 
  file.managed: 
    - source: salt://haproxy/files/haproxy 
    - mode: 755 
    - user: root 
  cmd.run: 
    - name: chkconfig --add haproxy 
    - unless: chkconfig --list |grep haproxy

编写 service.sls 文件

[root@master ~]# vim /srv/salt/haproxy/service.sls
include: 
  - haproxy.install
haproxy-service: 
  service.running:
    - name: haproxy 
    - enable: True 
    - reload: True 
    - watch: 
      - file: /etc/haproxy/haproxy.cfg

在入口 top.sls 文件中增加 Haproxy 相关的操作

[root@master ~]# vim /srv/salt/top.sls
base: 
  'web0[1-2].sls.com': 
    - keepalived.service
    - haproxy.service

主控端执行推送动作,两台被控端安装 Haproxy。同样第一次执行时间比较长,执行完成后可以看到每台被控端上面成功执行任务 ID。

[root@master ~]# salt ‘*’ state.highstate

部署 Nginx

创建 nginx 目录,目录结构还是保持一致

[root@master ~]# mkdir -pv /srv/salt/nginx/files

因为 Nginx 服务也需要用 nginx 用户运行,所以需要创建

[root@master ~]# vim /srv/salt/user/nginx.sls
nginx: 
  group.present: 
    - gid: 400 
  user.present: 
    - uid: 400 
    - gid: 400 
    - shell: /sbin/nologin 
    - home: /home/nginx

从 Nginx 官网下载源码包上传到/root 目录然后解压

[root@master ~]# tar xf nginx-1.12.0.tar.gz

[root@master ~]# nginx-1.12.0.tar.gz /srv/salt/nginx/files

[root@master ~]# nginx-1.12.0/conf/nginx.conf /srv/salt/nginx/files

因为源码包里面没有启动 Nginx 服务脚本文件。加上本案例的 Centos7.3系统采用 Systemctl 来管理启动服务,所以需要手动创建一个启动文件nginx.service

[root@master ~]# vim /srv/salt/nginx/files/nginx.service
[Unit] 
Description=nginx service 
After=network.target 
[Service]
Type=forking 
ExecStart=/usr/local/nginx/sbin/nginx 
ExecReload=/usr/local/nginx/sbin/nginx -s reload 
ExecStop=/usr/local/nginx/sbin/nginx -s quit 
PrivateTmp=true 
[Install] 
WantedBy=multi-user.target

为了区分两台 Nginx 节点首页显示不一样内容,创建一个 index.html 文件

[root@master ~]# vim /srv/salt/nginx/files/index.html
{% if grains['fqdn'] == 'web01.sls.com' %} 
This is 192.168.10.111 node! 
{% elif grains['fqdn'] == 'web02.sls.com' %} 
This is 192.168.10.112 node! 
{% endif %}

查看 /srv/salt/nginx/files/目录下的文件

[root@master ~]# ls /srv/salt/nginx/files/
index.html  nginx-1.12.0.tar.gz  nginx.conf  nginx.service

编写 Nginx 的 install.sls 安装文件

[root@master ~]# vim /srv/salt/nginx/install.sls
include:
  - user.nginx 
nginx-install: 
  file.managed: 
    - name: /usr/local/src/nginx-1.12.0.tar.gz 
    - source: salt://nginx/files/nginx-1.12.0.tar.gz 
  cmd.run:
    - name: cd /usr/local/src && tar zxf nginx-1.12.0.tar.gz && cd nginx-1.12.0 && ./configure --prefix=/usr/local/nginx && make && make install 
    - unless: test -d /usr/local/nginx 
    - require: 
      - file: nginx-install 
    
/usr/local/nginx/conf/nginx.conf: 
  file.managed: 
    - source: salt://nginx/files/nginx.conf 
    - mode: 644 
  cmd.run: 
    - name: sed -i 's/#user nobody/user nginx/g' /usr/local/nginx/conf/nginx.conf && sed -i '0,/80/s/80/8888/' /usr/local/nginx/conf/nginx.conf 
    
/usr/local/nginx/html/index.html: 
  file.managed: 
    - source: salt://nginx/files/index.html 
    - mode: 644 
    - template: jinja

nginx-init: 
  file.managed: 
    - name: /usr/lib/systemd/system/nginx.service 
    - source: salt://nginx/files/nginx.service 
    - mode: 644 
    - user: root 
  cmd.run: 
    - name: systemctl enable nginx 
    - unless: systemctl is-enabled nginx

编写 Nginx 的启动服务 service.sls 文件

[root@master ~]# vim  /srv/salt/nginx/service.sls
include:
  - nginx.install 
nginx-service: 
  service.running: 
    - name: nginx 
    - enable: True 
    - reload: True 
    - watch: 
      - file: /usr/local/nginx/conf/nginx.conf

编写总入口文件 top.sls 文件,增加 Nginx 相关操作

[root@master ~]# vim /srv/salt/top.sls
base: 
  'web0[1-2].sls.com': 
    - keepalived.service
    - haproxy.service
    - nginx.service

最后在 salt-master 端给 salt-minion 端推 top.sls 文件,实现在 Minion 端根据 不同需求自动安装 Keepalived、Haproxy、Nginx 服务并启动。如下命令第一次执行时间会 比较长,请耐心等待。每台 Minion 端成功执行 30个 ID 任务。

[root@master ~]# salt '*' state.highstate

查看整个 salt 的目录结构

[root@master ~]# tree /srv/salt/
/srv/salt/
├── haproxy
│   ├── files
│   │   ├── haproxy
│   │   ├── haproxy-1.5.19.tar.gz
│   │   └── haproxy.cfg
│   ├── install.sls
│   └── service.sls
├── keepalived
│   ├── files
│   │   ├── keepalived
│   │   ├── keepalived-1.2.13.tar.gz
│   │   ├── keepalived.conf
│   │   └── keepalived.sys
│   ├── install.sls
│   └── service.sls
├── nginx
│   ├── files
│   │   ├── index.html
│   │   ├── nginx-1.12.0.tar.gz
│   │   ├── nginx.conf
│   │   └── nginx.service
│   ├── install.sls
│   └── service.sls
├── pkg
│   └── make.sls
├── top.sls
└── user
    ├── haproxy.sls
    └── nginx.sls

8 directories, 21 files

验证部署结果

登录两台 Minion 节点查看 Keepalived 状态。

@2 @3 ps -ef | grep keepalived

@2 ip a

@2 @3 ps -ef | grep haproxy

​ ps -ef | grep nginx

通过 VIP 地址查看 Haproxy 的状态

在 Master 控制主机上访问 VIP 地址查看是否轮询

在控制主机上再次访问 VIP 地址,后端一个 Nginx 节点正常提供服务,高可用架构正常

[root@web01 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:af:19:93 brd ff:ff:ff:ff:ff:ff
    inet 192.168.10.111/24 brd 192.168.10.255 scope global eth0
       valid_lft forever preferred_lft forever
    **inet 192.168.11.110/32 scope global eth0**
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:feaf:1993/64 scope link 
       valid_lft forever preferred_lft forever
  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值