一.自动部署keepalived
创建sls文件
cd /srv/salt
mkdir keepalived/
cd keepalived/
vim init.sls
kp-install:
pkg.installed:
- name: keepalived
file.managed:
- name: /etc/keepalived/keepalived.conf
- source: salt://keepalived/keepalived.conf
- template: jinja
- context:
STATE: {{ pillar['state'] }}
VRID: {{ pillar['vrid'] }}
PRI: {{ pillar['pri'] }}
service.running:
- name: keepalived
- enable: true
- reload: true
- watch:
- file: kp-install
修改jinja模板
在server6上下载keepalived
yum install -y keepalived
cd /etc/keepalived
scp keeplived.conf server5:/srv/salt/keepalived/
接着把传到server5的配置文件修改一下:
vim keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict #需要注释到 keepalived自带火墙策略 不注释就会404访问不了
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state {{ STATE }}
interface eth0
virtual_router_id {{ VRID }}
priority {{ PRI }}
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.0.100
}
}
定义pillar值
server5:
cd /srv/pillar/
vim pkgs.sls
{% if grains['fqdn'] == 'server6' %}
package: httpd
port: 80
{% elif grains['fqdn'] == 'server7' %}
package: httpd
port: 8080
{% endif %}
cp pkgs.sls kp.sls
vim kp.sls
{% if grains['fqdn'] == 'server6' %}
state: MASTER
vrid: 1
pri: 100
{% elif grains['fqdn'] == 'server7' %}
state: BACKUP
vrid: 1
pri: 50
{% endif %}
vim top.sls
base:
'*':
- pkgs
- kp
执行:
salt '*' state.sls keepalived
准备Top文件
cd /srv/salt/
vim top.sls
base:
'roles:apache':
- match: grain
- apache
- keepalived
'roles:nginx':
- match: grain
- nginx
- keepalived
批量执行:
salt '*' state.highstate
执行结果如下:
在server6上发现vip已经出现
我们修改一下server6的默认发布页面,为了方便测试
cd /var/www/html/
echo server6 > index.html
测试:
在真机上访问vip,查看返回值
curl 172.25.76.100 #自己的IP
此时若是把server6的keepalived停掉,会发现vip飘到了server7上:
systemctl stop keepalived
在server7上查看IP
ip addr
再次在真机访问一下vip
curl 172.25.76.100
会发现变为server7的nginx的发布页面
salt '*' state.highstate
此时重新推送一下,会发现server6的keepalived又重新恢复了,vip飘到了server6上了
再次在真机上curl一下vip变为了server2
二.JOB管理
Job简介
master在下发指令任务时,会附带上产生的jid。
minion在接收到指令开始执行时,会在本地的/var/cache/salt/minion/proc目录下产生该jid命名的文件,用于在执行过程中master查看当前任务的执行情况。
指令执行完毕将结果传送给master后,删除该临时文件。
JOB CACHE
Job缓存默认保存24小时
Job存储到数据库
在server5上操作
yum install mariadb-server.x86_64 -y
systemctl start mariadb.service
yum install -y MySQL-python.x86_64
安全初始化一下数据库
mysql_secure_installation
(一)老版本需要从minion端传到master
cd
vim job.sql
CREATE DATABASE `salt`
DEFAULT CHARACTER SET utf8
DEFAULT COLLATE utf8_general_ci;
USE `salt`;
--
-- Table structure for table `jids`
--
DROP TABLE IF EXISTS `jids`;
CREATE TABLE `jids` (
`jid` varchar(255) NOT NULL,
`load` mediumtext NOT NULL,
UNIQUE KEY `jid` (`jid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `salt_returns`
--
DROP TABLE IF EXISTS `salt_returns`;
CREATE TABLE `salt_returns` (
`fun` varchar(50) NOT NULL,
`jid` varchar(255) NOT NULL,
`return` mediumtext NOT NULL,
`id` varchar(255) NOT NULL,
`success` varchar(10) NOT NULL,
`full_ret` mediumtext NOT NULL,
`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
KEY `id` (`id`),
KEY `jid` (`jid`),
KEY `fun` (`fun`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Table structure for table `salt_events`
--
DROP TABLE IF EXISTS `salt_events`;
CREATE TABLE `salt_events` (
`id` BIGINT NOT NULL AUTO_INCREMENT,
`tag` varchar(255) NOT NULL,
`data` mediumtext NOT NULL,
`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
`master_id` varchar(255) NOT NULL,
PRIMARY KEY (`id`),
KEY `tag` (`tag`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
把它导入数据库
mysql -pwestos < job.sql
查看一下数据库的表和库
修改server6的配置文件:
接着修改minion的配置文件
vim /etc/salt/minion
master_job_cache: mysql
mysql.host: '172.25.76.5'
mysql.user: 'salt'
mysql.pass: 'salt'
mysql.db: 'salt'
mysql.port: 3306
systemctl restart salt-minion.service
在server5上添加用户认证
mysql -pwestos salt
MariaDB [(none)]> grant all on salt.* to salt@'%' identified by 'salt';
测试一下
salt server6 my_disk.df --return mysql
然后在mysql 查看一下数据:
mysql -pwestos salt
MariaDB [(none)]> use salt;
MariaDB [salt]> select * from salt_returns\G;
(二)新版本,直接从master端传到数据库
server5:
yum install -y MySQL-python.x86_64
vim /etc/salt/master
master_job_cache: mysql
mysql.host: 'localhost'
mysql.user: 'salt'
mysql.pass: 'salt'
mysql.db: 'salt'
mysql.port: 3306
systemctl restart salt-master
先在server5上ping 一下server67然后在mysql里面查看记录
salt '*' test.ping
mysql -usalt -psalt salt
MariaDB [(none)]> use salt;
MariaDB [salt]> select * from salt_returns\G;
看到server67的记录如下即可
若是只有一个,看看server2,3哪个的服务minion没有开启
三.salt-ssh
1.简介
salt-ssh可以独立运行的,不需要minion端。
salt-ssh 用的是sshpass进行密码交互的。
以串行模式工作,性能下降。
2.安装配置
yum install salt-ssh.noarch -y
cd /etc/salt/
vim roster
server6:
host: 172.25.76.6
user: root
passwd: westos
我们需要停掉server6的minion服务进行测试:
systemctl stop salt-minion.service
测试:
salt-ssh '*' test.ping
salt-ssh '*' my_disk.df
四.salt-syndic
1.salt-syndic简介
syndic其实就是个代理,隔离master与minion。
Syndic必须要运行在master上,再连接到另一个topmaster上。
Topmaster 下发的状态需要通过syndic来传递给下级master,minion传递给master的数据也是由syndic传递给topmaster。
topmaster并不知道有多少个minion。
syndic与topmaster的file_roots和pillar_roots的目录要保持一致。
从下面的结构图中很好理解:
这里我们需要配置一台新的虚拟机:server4作为顶级master
yum install -y salt-master
vim /etc/salt/master
order_masters: True
systemctl enable --now salt-master.service
server5安装配置
yum install -y salt-syndic.noarch
systemctl enable --now salt-syndic.service
vim /etc/salt/master
syndic_master: 172.25.76.4
systemctl restart salt-master.service
systemctl start salt-syndic
在server4上获得密钥
salt-key -L
salt-key -A
因为前面把server6的minion服务停止了,先在重新打开它!
systemctl start salt-minion
测试:
salt '*' state.sls keepalived
我们发现在server4上执行,其实他是调度server5上的master去执行的
五.salt-api配置
1.下载api
server5:
yum -y install salt-api.noarch
2.salt-api配置
2.1生成证书
cd /etc/pki/tls/private/
openssl genrsa 1024
openssl genrsa 1024 > localhost.key
cd ..
cd certs/
make testcert
3.激活rest_cherrypy
将localhost.key,localhost.crt的绝对路径写进去
cd /etc/salt/master.d/
vim api.conf
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/private/localhost.key
4.创建用户认证
vim auth.conf
external_auth:
pam:
saltapi:
- .*
- '@wheel'
- '@runner'
- '@jobs'
创建saltapi用户
useradd saltapi
passwd saltapi
修改之后重启服务:
systemctl restart salt-master.service
systemctl enable --now salt-api.service
netstat -antlp | grep :8000
5.salt-api使用
5.1获取认证token
curl -sSk https://172.25.76.5:8000/login -H 'Accept: application/x-yaml' -d username=saltapi -d password=westos -d eauth=pam
会生成token
6.推送任务
curl -sSk https://172.25.0.1:8000 -H 'Accept: application/x-yaml' -H 'X-Auth-Token: ****************' -d username=saltapi -d password=westos -d client=local -d tgt='*' -d fun=test.ping
六.结合python脚本直接推送任务
在server5上
cd
vim saltapi.py
# -*- coding: utf-8 -*-
import urllib2,urllib
import time
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
try:
import json
except ImportError:
import simplejson as json
class SaltAPI(object):
__token_id = ''
def __init__(self,url,username,password):
self.__url = url.rstrip('/')
self.__user = username
self.__password = password
def token_id(self):
''' user login and get token id '''
params = {'eauth': 'pam', 'username': self.__user, 'password': self.__password}
encode = urllib.urlencode(params)
obj = urllib.unquote(encode)
content = self.postRequest(obj,prefix='/login')
try:
self.__token_id = content['return'][0]['token']
except KeyError:
raise KeyError
def postRequest(self,obj,prefix='/'):
url = self.__url + prefix
headers = {'X-Auth-Token' : self.__token_id}
req = urllib2.Request(url, obj, headers)
opener = urllib2.urlopen(req)
content = json.loads(opener.read())
return content
def list_all_key(self):
params = {'client': 'wheel', 'fun': 'key.list_all'}
obj = urllib.urlencode(params)
self.token_id()
content = self.postRequest(obj)
minions = content['return'][0]['data']['return']['minions']
minions_pre = content['return'][0]['data']['return']['minions_pre']
return minions,minions_pre
def delete_key(self,node_name):
params = {'client': 'wheel', 'fun': 'key.delete', 'match': node_name}
obj = urllib.urlencode(params)
self.token_id()
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def accept_key(self,node_name):
params = {'client': 'wheel', 'fun': 'key.accept', 'match': node_name}
obj = urllib.urlencode(params)
self.token_id()
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def remote_noarg_execution(self,tgt,fun):
''' Execute commands without parameters '''
params = {'client': 'local', 'tgt': tgt, 'fun': fun}
obj = urllib.urlencode(params)
self.token_id()
content = self.postRequest(obj)
ret = content['return'][0][tgt]
return ret
def remote_execution(self,tgt,fun,arg):
''' Command execution with parameters '''
params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg}
obj = urllib.urlencode(params)
self.token_id()
content = self.postRequest(obj)
ret = content['return'][0][tgt]
return ret
def target_remote_execution(self,tgt,fun,arg):
''' Use targeting for remote execution '''
params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg, 'expr_form': 'nodegroup'}
obj = urllib.urlencode(params)
self.token_id()
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def deploy(self,tgt,arg):
''' Module deployment '''
params = {'client': 'local', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg}
obj = urllib.urlencode(params)
self.token_id()
content = self.postRequest(obj)
return content
def async_deploy(self,tgt,arg):
''' Asynchronously send a command to connected minions '''
params = {'client': 'local_async', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg}
obj = urllib.urlencode(params)
self.token_id()
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def target_deploy(self,tgt,arg):
''' Based on the node group forms deployment '''
params = {'client': 'local_async', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg, 'expr_form': 'nodegroup'}
obj = urllib.urlencode(params)
self.token_id()
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def main():
sapi = SaltAPI(url='https://172.25.76.5:8000',username='saltapi',password='westos')
sapi.token_id()
print sapi.list_all_key()
#sapi.delete_key('test-01')
#sapi.accept_key('test-01')
sapi.deploy('server6','apache')
#print sapi.remote_noarg_execution('test-01','grains.items')
if __name__ == '__main__':
main()
SaltAPI(url=‘https://172.25.76.5:8000’ 要写自己的ip地址
print sapi.list_all_key() 打开会输出推送的提示,注释起来直接推送服务没有提示
我们先打开它,执行结果如下:
当注释起来之后,
先手动停止server6上的httpd服务:
systemctl stop httpd
看到进程没有httpd之后,在server5再次推送一下脚本
server5:
python saltapi.py
在server6上查看httpd
systemctl status httpd
可以看到httpd服务开启
七、自动化部署zabbix
实验环境介绍:
两台rhel7.6的虚拟机server5、server7,其中server5为saltstack的master端,server7为saltstack的minion端,在server7上配置zabbix的所有东西,火墙和selinux全部关掉。
server7端:
配置软件仓库
vim /etc/yum.repos.d/zabbix.repo
[zabbix]
name=zabbix4.0
baseurl=http://172.25.76.250/4.0
gpgcheck=0
分别对zabbix-server,zabbix-db,zabbix-agent,zabbix-web进行使用saltstack进行安装配置。
cd /srv/salt/
mkdir zabbix-server
mkdir zabbix-db
mkdir zabbix-agent
mkdir zabbix-web
mkdir /srv/salt/zabbix-repo
使用satlstack配置zabbix
安装包下载
先安装所有需要的包,包括zabbix-server-mysql,zabbix-agent,zabbix-web-mysql,mariadb-server,MySQL-python
vim /srv/salt/zabbix-repo/init.sls
all-install:
pkg.installed:
- pkgs:
- zabbix-server-mysql
- zabbix-agent
- zabbix-web-mysql
- mariadb-server
- MySQL-python
测试,执行:
salt server7 state.sls zabbix-repo
所有包安装完成,查看/usr/share/doc/zabbix-server-mysql-4.0.5中create.sql.gz是否存在,为zabbix-db配置做准备。
cd /usr/share/doc/zabbix-server-mysql-4.0.5/
ls
将数据包传到server5的zabbix-db中为修改后续数据库配置作准备。
scp create.sql.gz server5:/srv/salt/zabbix-db/
数据库配置
创建库,授权用户,导入数据
cd zabbix-db/
vim init.sls
db-running:
service.running:
- name: mariadb
db-configure:
mysql_database.present:
- name: zabbix
- character_set: utf8
- collate: utf8_bin
- connection_user: root
- connection_pass:
mysql_user.present:
- name: zabbix
- host: localhost
- password: westos
- connection_user: root
- connection_pass:
mysql_grants.present:
- grant: all privileges
- database: zabbix.*
- user: zabbix
- connection_user: root
- connection_pass:
file.managed:
- name: /usr/share/doc/zabbix-server-mysql-4.0.5/create.sql.gz
- source: salt://zabbix-db/create.sql.gz
# cmd.wait:当watch中的内容发生变化时才执行,cmd.run为一直执行
# 此处如果使用cmd.run 会一直导入数据,导致报错
cmd.wait:
- name: zcat /usr/share/doc/zabbix-server-mysql-4.0.5/create.sql.gz | mysql -uroot zabbix
- watch:
- mysql_database: db-configure
执行
salt server7 state.sls zabbix-db
进入server7数据库中查看导入结果
配置zabbix文件并启动服务
将server7中已经zabbix-server主配置文件传给server5中
cd /etc/zabbix/
scp zabbix_server.conf server5:/srv/salt/zabbix-server/
server5中修改配置文件
cd ..
vim zabbix_server.conf
vim init.sls
zabbix-server:
file.managed:
- name: /etc/zabbix/zabbix_server.conf
- source: salt://zabbix-server/zabbix_server.conf
service.running:
- name: zabbix-server
- enable: true
- reload: true
- watch:
- file: zabbix-server
测试执行:
salt server7 state.sls zabbix-server
检查server7服务是否启动
进程显示zabbix-server已经开启,配置zabbix-server成功。
agent端开启服务
vim /srv/salt/zabbix-agent/init.sls
zabbix-agent:
service.running:
- name: zabbix-agent
- enable: true
执行
salt server7 state.sls zabbix-agent
server7中查看进程ps ax
zabbix-web配置并开启服务
server7中将apache虚拟机主机zabbix文件拷贝到server5中
scp /etc/httpd/conf.d/zabbix.conf server5:/srv/salt/zabbix-web/
修改文件中的时区
vim init.sls
zabbix-web:
file.managed:
- name: /etc/httpd/conf.d/zabbix.conf
- source: salt://zabbix-web/zabbix.conf
service.running:
- name: httpd
- enable: true
- watch:
- file: zabbix-web
执行:
salt server7 state.sls zabbix-web
测试:
firefox:
http://172.25.76.7/zabbix/
highstate调用
vim /srv/salt/top.sls
base:
'server7':
- zabbix-repo
- zabbix-db
- zabbix-server
- zabbix-agent
- zabbix-web
highstate调用top
salt server7 state.highstate
执行成功,进入网页测试:
firefox:172.25.9.2/zabbix/