一键部署基于keepliaved和haproxy的负载均衡、高可用、保存传输数据

1.源码安装keepalived
[root@server5 salt]# cd keepalived/
[root@server5 keepalived]# ls
install.sls
[root@server5 keepalived]# vim install.sls 
[root@server5 keepalived]# ls
install.sls
[root@server5 keepalived]# mkdir files
[root@server5 keepalived]# cd files/
[root@server5 files]# pwd
/srv/salt/keepalived/files
[root@server5 files]# ls
keepalived-2.0.6.tar.gz

[root@server5 keepalived]# cat install.sls
include:
  - nginx.make

kp-install:
  file.managed:
    - name: /mnt/keepalived-2.0.6.tar.gz
    - source: salt://keepalived/files/keepalived-2.0.6.tar.gz
  cmd.run:
    - name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make &> /dev/null && make install &> /dev/null
    - creates: /usr/local/keepalived

测试原码编译是否完成:

[root@server8 ~]# cd /mnt/
[root@server8 mnt]# ls
keepalived-2.0.6  keepalived-2.0.6.tar.gz
[root@server8 mnt]# cd /usr/local/keepalived/
[root@server8 keepalived]# ls
bin  etc  sbin  share
2.在install.sls中加上软连接
/etc/keepalived:       ##目录加权限
  file.directory:
    - mode: 755


/etc/sysconfig/keepalived:    ##软连接
  file.symlink:
    - target: /usr/local/keepalived/etc/sysconfig/keepalived

/sbin/keepalived:     ##软连接
  file.symlink:
    - target: /usr/local/keepalived/sbin/keepalived
    -
[root@server5 keepalived]# salt server8 state.sls keepalived.install

测试:
Server8:

[root@server8 keepalived]# ll /etc/sysconfig/keepalived 
lrwxrwxrwx 1 root root 46 Aug 18 10:23 /etc/sysconfig/keepalived -> /usr/local/keepalived/etc/sysconfig/keepalived


[root@server8 keepalived]# ll /sbin/keepalived 
lrwxrwxrwx 1 root root 37 Aug 18 10:23 /sbin/keepalived -> /usr/local/keepalived/sbin/keepalived
3.启动
[root@server8 keepalived]# scp keepalived.conf 
[root@server8 init.d]# pwd
/usr/local/keepalived/etc/rc.d/init.d
[root@server8 init.d]# scp keepalived root@172.25.24.5:/srv/salt/keepalived/files

Server5:

[root@server5 keepalived]# ls
files  install.sls  service.sls
[root@server5 keepalived]# cat service.sls 
include:
  - keepalived.install

/etc/keepalived/keepalived.conf:
  file.managed:
    - source: salt://keepalived/files/keepalived.conf

kp-service:
  file.managed:
    - name: /etc/init.d/keepalived
    - source: salt://keepalived/files/keepalived
- mode: 755
[root@server5 keepalived]# salt server8 state.sls keepalived.service
设置pillar
[root@server5 pillar]# ls
keepalived-install.sls  top.sls  web-install.sls
[root@server5 pillar]# vim web-install.sls 
[root@server5 pillar]# vim keepalived-install.sls 
[root@server5 pillar]# pwd
/srv/pillar

[root@server5 pillar]# cat keepalived-install.sls 
{% if grains['fqdn'] == 'server5' %}
state: MATSER
vrid: 24
priority: 100
{% elif grains['fqdn'] == 'server8' %}
state: BACKUP
vrid: 24
priority: 50
{% endif %}



[root@server5 pillar]# cat top.sls 
base:
  '*':
    - web-install
    - keepalived-install
配置要推送的keepalivd
[root@server5 files]# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
    root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id {{ VRID }}
    priority {{ PRIORITY }}
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    172.25.24.100
    }
}

配置启动模块

root@server5 keepalived]# vim service.sls 
[root@server5 keepalived]# cat service.sls 
include:
  - keepalived.install

/etc/keepalived/keepalived.conf:
  file.managed:
    - source: salt://keepalived/files/keepalived.conf
    - template: jinja
    - context:
        STATE: {{ pillar['state'] }}
        VRID: {{ pillar['vird'] }}
        PRIORITY: {{ pillar['priority'] }}

kp-service:
  file.managed:
    - name: /etc/init.d/keepalived
    - source: salt://keepalived/files/keepalived
    - mode: 755
  service.running:
    - name: keepalived
    - reload: True
    - watch:
      - file: /etc/keepalived/keepalived.conf
配置高可用集群的部署文件
[root@server5 salt]# vim top.sls 
[root@server5 salt]# pwd
/srv/salt
[root@server5 salt]# cat top.sls 
base:
  'server5':
    - haproxy.haproxy-install          
    - keepalived.service
  'server8':
    - haproxy.haproxy-install
    - keepalived.service
  'roles:apache':
    - match: grain
    - httpd.install
  'roles:nginx':
    - match: grain
    - nginx.nginx-server
测试:
[root@server5 files]# salt '*' state.highstate
server7:
----------
          ID: nginx-group
    Function: group.present
        Name: nginx
      Result: True
     Comment: Group nginx is present and up to date
     Started: 11:32:52.476321
    Duration: 2.285 ms
     Changes:   
----------
          ID: nginx-user
    Function: user.present
        Name: nginx
      Result: True
     Comment: User nginx is present and up to date
     Started: 11:32:52.482834
    Duration: 66.642 ms
     Changes:   
----------
          ID: /usr/local/nginx/conf/nginx.conf
    Function: file.managed
      Result: True
     Comment: File /usr/local/nginx/conf/nginx.conf is in the correct state
     Started: 11:32:52.551951
    Duration: 251.387 ms
     Changes:   
----------
          ID: nginx-server
    Function: file.managed
        Name: /etc/init.d/nginx
      Result: True
     Comment: File /etc/init.d/nginx is in the correct state
     Started: 11:32:52.803573
    Duration: 137.583 ms
     Changes:   
----------
          ID: nginx-server
    Function: service.running
        Name: nginx
      Result: True
     Comment: The service nginx is already running
     Started: 11:32:52.960008
    Duration: 89.001 ms
     Changes:   

Summary for server7
------------
Succeeded: 5
Failed:    0
------------
Total states run:     5
Total run time: 546.898 ms
server8:
----------
          ID: haproxy-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 11:32:54.641219
    Duration: 470.856 ms
     Changes:   
----------
          ID: haproxy-install
    Function: file.managed
        Name: /etc/haproxy/haproxy.cfg
      Result: True
     Comment: File /etc/haproxy/haproxy.cfg is in the correct state
     Started: 11:32:55.114824
    Duration: 46.851 ms
     Changes:   
----------
          ID: haproxy-install
    Function: service.running
        Name: haproxy
      Result: True
     Comment: The service haproxy is already running
     Started: 11:32:55.162786
    Duration: 43.08 ms
     Changes:   
----------
          ID: pkg-init
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 11:32:55.206174
    Duration: 1.016 ms
     Changes:   
----------
          ID: kp-install
    Function: file.managed
        Name: /mnt/keepalived-2.0.6.tar.gz
      Result: True
     Comment: File /mnt/keepalived-2.0.6.tar.gz is in the correct state
     Started: 11:32:55.207323
    Duration: 90.963 ms
     Changes:   
----------
          ID: kp-install
    Function: cmd.run
        Name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make &> /dev/null && make install &> /dev/null
      Result: True
     Comment: /usr/local/keepalived exists
     Started: 11:32:55.299995
    Duration: 0.549 ms
     Changes:   
----------
          ID: /etc/keepalived
    Function: file.directory
      Result: True
     Comment: Directory /etc/keepalived is in the correct state
     Started: 11:32:55.300677
    Duration: 0.667 ms
     Changes:   
----------
          ID: /etc/sysconfig/keepalived
    Function: file.symlink
      Result: True
     Comment: Symlink /etc/sysconfig/keepalived is present and owned by root:root
     Started: 11:32:55.301449
    Duration: 1.721 ms
     Changes:   
----------
          ID: /sbin/keepalived
    Function: file.symlink
      Result: True
     Comment: Symlink /sbin/keepalived is present and owned by root:root
     Started: 11:32:55.303283
    Duration: 1.439 ms
     Changes:   
----------
          ID: /etc/keepalived/keepalived.conf
    Function: file.managed
      Result: True
     Comment: File /etc/keepalived/keepalived.conf is in the correct state
     Started: 11:32:55.304835
    Duration: 39.478 ms
     Changes:   
----------
          ID: kp-service
    Function: file.managed
        Name: /etc/init.d/keepalived
      Result: True
     Comment: File /etc/init.d/keepalived updated
     Started: 11:32:55.344461
    Duration: 81.44 ms
     Changes:   
              ----------
              diff:
                  Replace binary file with text file
----------
          ID: kp-service
    Function: service.running
        Name: keepalived
      Result: True
     Comment: The service keepalived is already running
     Started: 11:32:55.426566
    Duration: 37.631 ms
     Changes:   

Summary for server8
-------------
Succeeded: 12 (changed=1)
Failed:     0
-------------
Total states run:     12
Total run time:  815.691 ms
server6:
----------
          ID: apache-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 11:32:54.910411
    Duration: 634.703 ms
     Changes:   
----------
          ID: /etc/httpd/conf/httpd.conf
    Function: file.managed
      Result: True
     Comment: File /etc/httpd/conf/httpd.conf is in the correct state
     Started: 11:32:55.548923
    Duration: 62.172 ms
     Changes:   
----------
          ID: apache
    Function: service.running
        Name: httpd
      Result: True
     Comment: The service httpd is already running
     Started: 11:32:55.611307
    Duration: 32.768 ms
     Changes:   

Summary for server6
------------
Succeeded: 3
Failed:    0
------------
Total states run:     3
Total run time: 729.643 ms
server5:
----------
          ID: haproxy-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 11:32:55.232093
    Duration: 561.12 ms
     Changes:   
----------
          ID: haproxy-install
    Function: file.managed
        Name: /etc/haproxy/haproxy.cfg
      Result: True
     Comment: File /etc/haproxy/haproxy.cfg is in the correct state
     Started: 11:32:55.795728
    Duration: 45.38 ms
     Changes:   
----------
          ID: haproxy-install
    Function: service.running
        Name: haproxy
      Result: True
     Comment: The service haproxy is already running
     Started: 11:32:55.842050
    Duration: 36.106 ms
     Changes:   
----------
          ID: pkg-init
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 11:32:55.878423
    Duration: 0.883 ms
     Changes:   
----------
          ID: kp-install
    Function: file.managed
        Name: /mnt/keepalived-2.0.6.tar.gz
      Result: True
     Comment: File /mnt/keepalived-2.0.6.tar.gz is in the correct state
     Started: 11:32:55.879419
    Duration: 90.263 ms
     Changes:   
----------
          ID: kp-install
    Function: cmd.run
        Name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make &> /dev/null && make install &> /dev/null
      Result: True
     Comment: /usr/local/keepalived exists
     Started: 11:32:55.971076
    Duration: 0.41 ms
     Changes:   
----------
          ID: /etc/keepalived
    Function: file.directory
      Result: True
     Comment: Directory /etc/keepalived is in the correct state
     Started: 11:32:55.971582
    Duration: 0.567 ms
     Changes:   
----------
          ID: /etc/sysconfig/keepalived
    Function: file.symlink
      Result: True
     Comment: Symlink /etc/sysconfig/keepalived is present and owned by root:root
     Started: 11:32:55.972246
    Duration: 1.6 ms
     Changes:   
----------
          ID: /sbin/keepalived
    Function: file.symlink
      Result: True
     Comment: Symlink /sbin/keepalived is present and owned by root:root
     Started: 11:32:55.973956
    Duration: 1.317 ms
     Changes:   
----------
          ID: /etc/keepalived/keepalived.conf
    Function: file.managed
      Result: True
     Comment: File /etc/keepalived/keepalived.conf is in the correct state
     Started: 11:32:55.975374
    Duration: 37.431 ms
     Changes:   
----------
          ID: kp-service
    Function: file.managed
        Name: /etc/init.d/keepalived
      Result: True
     Comment: File /etc/init.d/keepalived updated
     Started: 11:32:56.012940
    Duration: 79.408 ms
     Changes:   
              ----------
              diff:
                  Replace binary file with text file
----------
          ID: kp-service
    Function: service.running
        Name: keepalived
      Result: True
     Comment: The service keepalived is already running
     Started: 11:32:56.092951
    Duration: 41.429 ms
     Changes:   

Summary for server5
-------------
Succeeded: 12 (changed=1)
Failed:     0
-------------
Total states run:     12
Total run time:  895.914 ms
[root@server5 salt]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:14:9a:8d brd ff:ff:ff:ff:ff:ff
    inet 172.25.24.5/24 brd 172.25.24.255 scope global eth0
    inet 172.25.24.100/32 scope global eth0
    inet6 fe80::5054:ff:fe14:9a8d/64 scope link 
       valid_lft forever preferred_lft forever

这里写图片描述

这里写图片描述

[root@server5 files]# /etc/init.d/keepalived stop
Stopping keepalived:                                       [  OK  ]
 ##当关闭server5的keepalived

负载均衡依然在:
这里写图片描述
这里写图片描述

高可用集群之haproxy

先写一个基于haproxy的脚本,在hapaoxy端的haproxy

[root@server5 opt]# vim check_haproxy.sh

[root@server5 opt]# cat check_haproxy.sh 
#!/bin/bash

/etc/init.d/haproxy status &> /dev/null || /etc/init.d/haproxy restart &> /dev/null

if [ $? -ne 0 ];then
/etc/init.d/keepalived stop &> /dev/null

fi



[root@server5 opt]# pwd
/opt

[root@server5 opt]# chmod +x check_haproxy.sh



[root@server5 files]# scp /opt/check_haproxy.sh  root@172.25.24.8:/opt/

[root@server5 files]# pwd
/srv/salt/keepalived/files
[root@server5 files]# vim keepalived.conf   

这里写图片描述
这里写图片描述

[root@server5 salt]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:14:9a:8d brd ff:ff:ff:ff:ff:ff
    inet 172.25.24.5/24 brd 172.25.24.255 scope global eth0
    inet 172.25.24.100/32 scope global eth0
    inet6 fe80::5054:ff:fe14:9a8d/64 scope link 
       valid_lft forever preferred_lft forever
[root@server5 files]# cd /etc/init.d/
[root@server5 init.d]# ls
auditd            haproxy       keepalived    netfs        rhnsd        sandbox
blk-availability  htcacheclean  killall       network      rhsmcertd    saslauthd
crond             httpd         lvm2-lvmetad  postfix      rsyslog      single
functions         ip6tables     lvm2-monitor  rdisc        salt-master  sshd
halt              iptables      netconsole    restorecond  salt-minion  udev-post
[root@server5 init.d]# chmod -x haproxy 

Server8:

[root@server8 init.d]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:b7:76:eb brd ff:ff:ff:ff:ff:ff
    inet 172.25.24.8/24 brd 172.25.24.255 scope global eth0
    inet 172.25.24.100/32 scope global eth0
    inet6 fe80::5054:ff:feb7:76eb/64 scope link 
       valid_lft forever preferred_lft forever
You have mail in /var/spool/mail/root
[root@server8 init.d]# /etc/init.d/haproxy status
haproxy (pid  15363) is running...

浏览器刷新:
这里写图片描述

这里写图片描述

保存传输数据

[root@server5 opt]# yum install -y mysql-server  ##在matser端安装数据库

第一种:
由minion端返回,需要在minion端安装MySQL-python.x86_64,在此仅以server6演示

server6:

[root@server6 html]# yum install MySQL-python.x86_64 -y
[root@server6 html]# vim /etc/salt/minion

这里写图片描述

数据库授权并导入salt库

server5:

[root@server5 opt]# /etc/init.d/mysqld start


mysql> grant all on salt.* to salt@'172.25.24.%' identified by 'westos';   ##授权
Query OK, 0 rows affected (0.00 sec)

[root@server5 ~]# cat test.sql 
CREATE DATABASE  `salt`
  DEFAULT CHARACTER SET utf8
  DEFAULT COLLATE utf8_general_ci;

USE `salt`;

--
-- Table structure for table `jids`
--

DROP TABLE IF EXISTS `jids`;
CREATE TABLE `jids` (
  `jid` varchar(255) NOT NULL,
  `load` mediumtext NOT NULL,
  UNIQUE KEY `jid` (`jid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
#CREATE INDEX jid ON jids(jid) USING BTREE;

--
-- Table structure for table `salt_returns`
--

DROP TABLE IF EXISTS `salt_returns`;
CREATE TABLE `salt_returns` (
  `fun` varchar(50) NOT NULL,
  `jid` varchar(255) NOT NULL,
  `return` mediumtext NOT NULL,
  `id` varchar(255) NOT NULL,
  `success` varchar(10) NOT NULL,
  `full_ret` mediumtext NOT NULL,
  `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
  KEY `id` (`id`),
  KEY `jid` (`jid`),
  KEY `fun` (`fun`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

--
-- Table structure for table `salt_events`
--

DROP TABLE IF EXISTS `salt_events`;
CREATE TABLE `salt_events` (
`id` BIGINT NOT NULL AUTO_INCREMENT,
`tag` varchar(255) NOT NULL,
`data` mediumtext NOT NULL,
`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
`master_id` varchar(255) NOT NULL,
PRIMARY KEY (`id`),
KEY `tag` (`tag`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
[root@server5 ~]# mysql < test.sql    ##导入salt库


mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| salt               |
| test               |
+--------------------+
4 rows in set (0.00 sec)

mysql> use salt
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
mysql> show tables;
+----------------+
| Tables_in_salt |
+----------------+
| jids           |
| salt_events    |
| salt_returns   |
+----------------+
3 rows in set (0.00 sec)

测试:

[root@server5 ~]# salt 'server6' test.ping --return mysql
server6:
    True

这里写图片描述

第二种:由matser端去接受,只需要配置master端

[root@server5 ~]# vim /etc/salt/maste

这里写图片描述

授权:

[root@server5 ~]#  yum install MySQL-python.x86_64 -y



mysql> grant all on salt.* to salt@localhost identified by 'westos';
Query OK, 0 rows affected (0.00 sec)

测试:
用命令推一下
这里写图片描述

mysql> select * from salt_returns;

这里写图片描述

模块的制作、加载

[root@server5 init.d]# mkdir /srv/salt/_modules

[root@server5 salt]# cd  _modules/
[root@server5 _modules]# vim my_disk.py
[root@server5 _modules]# cat my_disk.py 
#!/usr/bin/env python

def dff():
    return __salt__['cmd.run']('df -h')


[root@server5 _modules]# salt '*' saltutil.sync_modules  ##刷新

server6:
    - modules.my_disk
server8:
    - modules.my_disk
server5:
    - modules.my_disk
server7:
    - modules.my_disk


[root@server5 _modules]# salt '*' my_disk.df
[WARNING ] Returner unavailable: MySQL returner could not connect to database: (1045, "Access denied for user 'salt'@'localhost' (using password: YES)")
server7:
    Filesystem                    Size  Used Avail Use% Mounted on
    /dev/mapper/VolGroup-lv_root   19G  1.1G   17G   7% /
    tmpfs                         499M   32K  499M   1% /dev/shm
    /dev/vda1                     485M   33M  427M   8% /boot
server6:
    Filesystem                    Size  Used Avail Use% Mounted on
    /dev/mapper/VolGroup-lv_root   19G  993M   17G   6% /
    tmpfs                         499M   32K  499M   1% /dev/shm
    /dev/vda1                     485M   33M  427M   8% /boot
server8:
    Filesystem                    Size  Used Avail Use% Mounted on
    /dev/mapper/VolGroup-lv_root   19G  1.1G   17G   7% /
    tmpfs                         499M   16K  499M   1% /dev/shm
    /dev/vda1                     485M   33M  427M   8% /boot
server5:
    Filesystem                    Size  Used Avail Use% Mounted on
    /dev/mapper/VolGroup-lv_root   19G  1.2G   17G   7% /
    tmpfs                         499M   36K  499M   1% /dev/shm
    /dev/vda1                     485M   33M  427M   8% /boot

Topmaster:

topmaster是用来管理master的,master可以有多个,但是topmaster只能有一个

server9:topmaster

[root@server9 ~]# yum install -y salt-master
[root@server9 ~]# vim /etc/salt/master   ##打开topmaster

这里写图片描述

[root@server9 ~]# /etc/init.d/salt-master start
Starting salt-master daemon:                               [  OK  ]

server5:master

[root@server5 _modules]# yum install -y salt-syndic

[root@server5 _modules]# vim /etc/salt/mast

这里写图片描述

root@server5 ~]# /etc/init.d/salt-master status
salt-master (pid  1005) is running...
[root@server5 ~]# /etc/init.d/salt-master start
Starting salt-master daemon: 
[root@server5 ~]# /etc/init.d/salt-syndic start
Starting salt-syndic daemon:                               [  OK  ]

测试:

[root@server5 ~]# salt-key -L
Accepted Keys:
server5
server6
server7
server8
Denied Keys:
Unaccepted Keys:
Rejected Keys:




[root@server9 ~]# salt-key -A
The following keys are going to be accepted:
Unaccepted Keys:
server5
Proceed? [n/Y] Y
Key for minion server5 accepted.

salt-key: error: -l option requires an argument
[root@server9 ~]# salt-key -L
Accepted Keys:
server5
Denied Keys:
Unaccepted Keys:
Rejected Keys:

[root@server9 salt]# salt '*' test.ping
server7:
    True
server5:
    True
server6:
    True
server8:
    True

在没有minion 端的情况下进行远程部署(salt-ssh)

Server6:

[root@server6 html]# /etc/init.d/salt-minion stop
Stopping salt-minion:root:server6 daemon: OK

Server5:

[root@server5 ~]# yum install -y salt-ssh

[root@server5 ~]# vim /etc/salt/roster

这里写图片描述

[root@server5 ~]# vim /etc/salt/master  ##关闭前面的数据库传输的影响

这里写图片描述

[root@server5 ~]# salt-ssh 'server6' test.ping
server6:
    True
[root@server5 ~]# salt-ssh 'server6' my_disk.df
server6:
    Filesystem                    Size  Used Avail Use% Mounted on
    /dev/mapper/VolGroup-lv_root   19G 1019M   17G   6% /
    tmpfs                         499M   64K  499M   1% /dev/shm
    /dev/vda1                     485M   33M  427M   8% /boot
[root@server5 ~]# salt-ssh 'server6' test.ping -i
server6:
    True

APSI操作

安装salt-api

[root@server5 ~]# yum install -y salt-api

生成localhost.key

[root@server5 ~]# cd /etc/pki/
[root@server5 pki]# ls
CA  ca-trust  entitlement  java  nssdb  product  rpm-gpg  rsyslog  tls
[root@server5 pki]# cd tls/
[root@server5 tls]# cd private/
[root@server5 private]# ls
[root@server5 private]# openssl genrsa 1024
[root@server5 private]# openssl genrsa 1024 >localhost.key
Generating RSA private key, 1024 bit long modulus
..................................++++++
.............++++++
e is 65537 (0x10001)

生成localhost.crt

[root@server5 private]# cd ..
[root@server5 tls]# cd certs/
[root@server5 certs]# ls
ca-bundle.crt  ca-bundle.trust.crt  make-dummy-cert  Makefile  renew-dummy-cert
[root@server5 certs]# make testcert
umask 77 ; \
    /usr/bin/openssl req -utf8 -new -key /etc/pki/tls/private/localhost.key -x509 -days 365 -out /etc/pki/tls/certs/localhost.crt -set_serial 0
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [XX]:cn
State or Province Name (full name) []:shaanxi
Locality Name (eg, city) [Default City]:xi'an
Organization Name (eg, company) [Default Company Ltd]:westos
Organizational Unit Name (eg, section) []:linux
Common Name (eg, your name or your server's hostname) []:server5
Email Address []:root@localhost


[root@server5 certs]# ls
ca-bundle.crt  ca-bundle.trust.crt  localhost.crt  make-dummy-cert  Makefile  renew-dummy-cert

写api配置文件

[root@server5 master.d]# vim api.conf
rest_cherrypy:
  port: 8000
  ssl_crt: /etc/pki/tls/certs/localhost.crt
  ssl_key: /etc/pki/tls/private/localhost.key
[root@server5 master.d]# ll /etc/pki/tls/certs/localhost.crt
-rw------- 1 root root 1029 Aug 18 17:20 /etc/pki/tls/certs/localhost.crt

[root@server5 master.d]# ll /etc/pki/tls/private/localhost.key
-rw-r--r-- 1 root root 887 Aug 18 17:18 /etc/pki/tls/private/localhost.key

写auth.conf配置文件

[root@server5 master.d]# ls
api.conf  auth.conf
[root@server5 master.d]# pwd
/etc/salt/master.d
[root@server5 master.d]#  cat auth.conf 
external_auth:
  pam:
    saltapi:
      - '.*'
      - '@wheel'
      - '@runner'
      - '@jobs'

建立用户

[root@server5 ~]# useradd saltapi
[root@server5 ~]# passwd saltapi
Changing password for user saltapi.
New password: 
BAD PASSWORD: it is based on a dictionary word
BAD PASSWORD: is too simple
Retype new password: 
passwd: all authentication tokens updated successfully.

重启服务

[root@server5 ~]# /etc/init.d/salt-master stop
Stopping salt-master daemon:                               [  OK  ]
[root@server5 ~]# /etc/init.d/salt-master status
salt-master is stopped
[root@server5 ~]# /etc/init.d/salt-master start
Starting salt-master daemon:                               [  OK  ]
[root@server5 ~]# /etc/init.d/salt-api start
Starting salt-api daemon:                                  [  OK  ]
[root@server5 ~]# netstat -antlp |grep :8000
tcp        0      0 0.0.0.0:8000                0.0.0.0:*                   LISTEN      3558/salt-api -d    
tcp        0      0 127.0.0.1:33893             127.0.0.1:8000              TIME_WAIT   -                   
[root@server5 ~]# 
[root@server5 ~]# curl -sSk https://localhost:8000/login -H 'Accept: application/x-yaml' -d username=saltapi  -d password=westos -d eauth=pam
return:
- eauth: pam
  expire: 1534630450.5352709
  perms:
  - .*
  - '@wheel'
  - '@runner'
  - '@jobs'
  start: 1534587250.535269
  token: 04aa264ab2c9a0f4fa5b3e39176fc624931789d9
  user: saltapi
oot@server5 ~]# curl -sSk https://localhost:8000 -H 'Accept: application/x-yaml' -H 'X-Auth-Token: 04aa264ab2c9a0f4fa5b3e39176fc624931789d9' \
-d client=local \
-d tgt='*' \
-d fun=test.ping

return:
- server5: true
  server6: true
  server7: true
  server8: true
[root@server5 ~]# cat saltapi.py 
# -*- coding: utf-8 -*-

import urllib2,urllib
import time

try:
    import json
except ImportError:
    import simplejson as json

class SaltAPI(object):
    __token_id = ''
    def __init__(self,url,username,password):
        self.__url = url.rstrip('/')
        self.__user = username
        self.__password = password

    def token_id(self):
        ''' user login and get token id '''
        params = {'eauth': 'pam', 'username': self.__user, 'password': self.__password}
        encode = urllib.urlencode(params)
        obj = urllib.unquote(encode)
        content = self.postRequest(obj,prefix='/login')
    try:
            self.__token_id = content['return'][0]['token']
        except KeyError:
            raise KeyError

    def postRequest(self,obj,prefix='/'):
        url = self.__url + prefix
        headers = {'X-Auth-Token'   : self.__token_id}
        req = urllib2.Request(url, obj, headers)
        opener = urllib2.urlopen(req)
        content = json.loads(opener.read())
        return content

    def list_all_key(self):
        params = {'client': 'wheel', 'fun': 'key.list_all'}
        obj = urllib.urlencode(params)
        self.token_id()
        content = self.postRequest(obj)
        minions = content['return'][0]['data']['return']['minions']
        minions_pre = content['return'][0]['data']['return']['minions_pre']
        return minions,minions_pre

    def delete_key(self,node_name):
        params = {'client': 'wheel', 'fun': 'key.delete', 'match': node_name}
        obj = urllib.urlencode(params)
        self.token_id()
        content = self.postRequest(obj)
        ret = content['return'][0]['data']['success']
        return ret

    def accept_key(self,node_name):
        params = {'client': 'wheel', 'fun': 'key.accept', 'match': node_name}
        obj = urllib.urlencode(params)
        self.token_id()
        content = self.postRequest(obj)
        ret = content['return'][0]['data']['success']
        return ret

    def remote_noarg_execution(self,tgt,fun):
        ''' Execute commands without parameters '''
        params = {'client': 'local', 'tgt': tgt, 'fun': fun}
        obj = urllib.urlencode(params)
        self.token_id()
        content = self.postRequest(obj)
        ret = content['return'][0][tgt]
        return ret

    def remote_execution(self,tgt,fun,arg):
        ''' Command execution with parameters '''        
        params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg}
        obj = urllib.urlencode(params)
        self.token_id()
        content = self.postRequest(obj)
        ret = content['return'][0][tgt]
        return ret

    def target_remote_execution(self,tgt,fun,arg):
        ''' Use targeting for remote execution '''
        params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg, 'expr_form': 'nodegroup'}
        obj = urllib.urlencode(params)
        self.token_id()
        content = self.postRequest(obj)
        jid = content['return'][0]['jid']
        return jid

    def deploy(self,tgt,arg):
        ''' Module deployment '''
        params = {'client': 'local', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg}
        obj = urllib.urlencode(params)
        self.token_id()
        content = self.postRequest(obj)
        return content

    def async_deploy(self,tgt,arg):
        ''' Asynchronously send a command to connected minions '''
        params = {'client': 'local_async', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg}
        obj = urllib.urlencode(params)
        self.token_id()
        content = self.postRequest(obj)
        jid = content['return'][0]['jid']
        return jid

    def target_deploy(self,tgt,arg):
        ''' Based on the node group forms deployment '''
        params = {'client': 'local_async', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg, 'expr_form': 'nodegroup'}
        obj = urllib.urlencode(params)
        self.token_id()
        content = self.postRequest(obj)
        jid = content['return'][0]['jid']
        return jid

def main():
    sapi = SaltAPI(url='https://172.25.24.5:8000',username='saltapi',password='westos')
   # sapi.token_id()
   # print sapi.list_all_key()
    #sapi.delete_key('test-01')
    #sapi.accept_key('test-01')
    sapi.deploy('server7','nginx.nginx-server')
    #print sapi.remote_noarg_execution('test-01','grains.items')

if __name__ == '__main__':
    main()
[root@server5 ~]# chmod 644 saltapi.py


[root@server7 ~]# /etc/init.d/nginx stop
Stopping nginx:                                            [  OK  ]
[root@server5 ~]# python saltapi.py 
[root@server7 ~]# /etc/init.d/nginx status
nginx (pid 4815 4814 4812) is running...
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值