salt yum 源
[root@server1 ~]# cat /etc/yum.repos.d/rhel-source.repo[rhel-source]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.254.53/rhel6.5
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
[salt]
name=salt
baseurl=http://172.25.254.53/pub/docs/saltstack/rhel6
gpgcheck=0
enabled=1
主安装
[root@server1 ~]# yum install salt-master -y
辅安装
[root@server2 ~]# yum install salt-minion -y
[root@server3 ~]# yum install salt-minion -y
在主中
[root@server1 ~]# salt-key -A 认证所有客户端
The following keys are going to be accepted:
Unaccepted Keys:
server2
Proceed? [n/Y] Y
Key for minion server2 accepted.
[root@server1 ~]# salt-key -L 查看
Accepted Keys:
server2
Denied Keys:
Unaccepted Keys:
Rejected Keys:
在辅中
[root@server2 ~]# vim /etc/salt/minion
master 172.25.254.241 master 端ip
[root@server2 ~]# /etc/init.d/salt-minion start
Starting salt-minion:root:server2 daemon: OK
在主中启动
[root@server2 ~]# /etc/init.d/salt-master start
[root@server1 master]# /etc/salt/pki/master
[root@server1 master]# yum install tree -y
[root@server1 master]# tree .
.
|-- master.pem
|-- master.pub
|-- minions
| `-- server2
|-- minions_autosign
|-- minions_denied
|-- minions_pre
`-- minions_rejected
在辅中
[root@server2 pki]# cd /etc/salt/pki
[root@server2 pki]# tree .
.
|-- master
`-- minion
|-- minion_master.pub
|-- minion.pem
`-- minion.pub
在主中
[root@server1 master]# md5sum master.pub 校验
9d5dccce36d17ea0b4ce53ef04cd4622 master.pub
[root@server1 master]# cd minions
[root@server1 minions]# ls
server2
[root@server1 minions]# md5sum server2 校验 server2
a90fe3c310d71fbeef69a8609a0cd70c server2
在辅中
[root@server2 pki]# cd minion/
[root@server2 minion]# md5sum minion.pub
a90fe3c310d71fbeef69a8609a0cd70c minion.pub
推httpd服务
在主中
[root@server1 salt]# vim master
[root@server1 salt]# /etc/init.d/salt-master restart
Stopping salt-master daemon: [ OK ]
Starting salt-master daemon: [ OK ]
[root@server1 salt]# mkdir /srv/salt
[root@server1 salt]# cd /srv/salt/
[root@server1 salt]# cd apache/
[root@server1 apache]# vim web.sls
apache-install:
pkg.installed:
- pkgs:
- httpd
- php
[root@server1 apache]# salt server2 state.sls apache.web test=true 查看
server2:
----------
ID: apache-install
Function: pkg.installed
Result: None
Comment: The following packages would be installed/updated: php
Started: 10:41:45.326004
Duration: 368.109 ms
Changes:
Summary for server2
[root@server1 apache]# salt server2 state.sls apache.web 推送
server2:
----------
ID: apache-install
Function: pkg.installed
Result: True
Comment: The following packages were installed/updated: php
The following packages were already installed: httpd
Started: 10:41:58.334205
Duration: 8849.828 ms
在辅中查看
[root@server2 minion]# rpm -q httpd php 多增加两个包
httpd-2.2.15-29.el6_4.x86_64
php-5.3.3-26.el6.x86_64
在主中执行
apache-install:
pkg.installed:
- pkgs:
- httpd
- php
service.running:
- name: httpd
- enable: ture
[root@server1 apache]# salt server2 state.sls apache.web
server2:
----------
ID: apache-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 10:48:40.205743
Duration: 356.056 ms
Changes:
----------
ID: apache-install
Function: service.running
Name: httpd
Result: True
Comment: Service httpd failed to start
Started: 10:48:40.562423
Duration: 306.968 ms
Changes:
推送给辅了httpd
在辅中拷贝httpd 文件给server1
[root@server2 conf]# scp /etc/httpd/conf/httpd.conf server1:/srv/salt/apache/files/
root@server1's password:
httpd.conf 100% 15 0.0KB/s 00:00
在主中
[root@server1 apache]# vim web.sls
apache-install:
pkg.installed:
- pkgs:
- httpd
- php
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://apache/files/httpd.conf
- user: root
- group: root
- mode: 644
service.running:
- name: httpd
- enable: ture
- reload: ture
- watch:
- file: apache-install
[root@server1 files]# salt server2 state.sls apache.web
server2:
----------
ID: apache-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 11:24:46.680700
Duration: 356.927 ms
Changes:
----------
ID: apache-install
Function: file.managed
Name: /etc/httpd/conf/httpd.conf
Result: True
Comment: File /etc/httpd/conf/httpd.conf is in the correct state
Started: 11:24:47.039400
Duration: 40.723 ms
Changes:
----------
ID: apache-install
Function: service.running
Name: httpd
Result: True
Comment: The service httpd is already running
Started: 11:24:47.080860
Duration: 33.448 ms
Changes:
Summary for server2
源码安装 nginx
[root@server1 salt]# mkdir nginx
[root@server1 nginx]# mkdir files
[root@server1 files]# ls
nginx-1.14.0.tar.gz
[root@server1 salt]# vim install.sls
include:
- pkgs.make
nginx-install:
file.managed:
- name: /mnt/nginx-1.14.0.tar.gz
- source: salt://nginx/files/nginx-1.14.0.tar.gz
cmd.run:
- name: cd /mnt && tar zxf nginx-1.14.0.tar.gz && cd nginx-1.14.0 && ./configure --prefix=/usr/local/nginx && make &> /dev/null && make install &> /dev/null
- creates: /usr/local/nginx
- require
- pkg: make
[root@server1 salt]# mkdir pkgs
[root@server1 pkgs]# vim make.sls
make:
pkg.installed:
- pkgs:
- gcc
- zlib-devel
- openssl-devel
- pcre-devel
[root@server1 nginx]# salt server2 state.sls nginx.install
server2:
----------
ID: make
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 14:50:52.832656
Duration: 401.331 ms
Changes:
----------
ID: nginx-install
Function: file.managed
Name: /mnt/nginx-1.14.0.tar.gz
Result: True
Comment: File /mnt/nginx-1.14.0.tar.gz is in the correct state
Started: 14:50:53.235827
Duration: 73.463 ms
Changes:
----------
ID: nginx-install
Function: cmd.run
Name: cd /mnt && tar zxf nginx-1.14.0.tar.gz && cd nginx-1.14.0 && ./configure --prefix=/usr/local/nginx && make &> /dev/null && make install &> /dev/null
Result: True
Comment: Command "cd /mnt && tar zxf nginx-1.14.0.tar.gz && cd nginx-1.14.0 && ./configure --prefix=/usr/local/nginx && make &> /dev/null && make install &> /dev/null" run
Started: 14:50:53.310236
Duration: 20116.861 ms
Changes:
[root@server1 files]# ls
nginx (启动脚本) nginx-1.14.0.tar.gz nginx.conf(server2 拷贝)
[root@server1 salt]# mkdir users
[root@server1 users]# vim add.sls
nginx:
user.present:
- uid: 800
- shell: /sbin/nologin
[root@server1 nginx]# vim service.sls
include:
- nginx.install
- users.add
/usr/local/nginx/conf/nginx.conf:
file.managed:
- source: salt://nginx/files/nginx.conf
nginx-service:
file.managed:
- name: /etc/init.d/nginx
- source: salt://nginx/files/nginx
- mode: 755
service.running:
- name: nginx
- enable: true
- reload: true
- watch:
- file: /usr/local/nginx/conf/nginx.conf
nginx(启动脚本)
#!/bin/sh
#
# nginx - this script starts and stops the nginx daemon
#
# chkconfig: - 85 15
# description: Nginx is an HTTP(S) server, HTTP(S) reverse \
# proxy and IMAP/POP3 proxy server
# processname: nginx
# config: /usr/local/nginx/conf/nginx.conf
# pidfile: /usr/local/nginx/logs/nginx.pid
# Source function library.
. /etc/rc.d/init.d/functions
# Source networking configuration.
. /etc/sysconfig/network
# Check that networking is up.
[ "$NETWORKING" = "no" ] && exit 0
nginx="/usr/local/nginx/sbin/nginx"
prog=$(basename $nginx)
lockfile="/var/lock/subsys/nginx"
pidfile="/usr/local/nginx/logs/${prog}.pid"
NGINX_CONF_FILE="/usr/local/nginx/conf/nginx.conf"
start() {
[ -x $nginx ] || exit 5
[ -f $NGINX_CONF_FILE ] || exit 6
echo -n $"Starting $prog: "
daemon $nginx -c $NGINX_CONF_FILE
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $pidfile $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
configtest_q || return 6
stop
start
}
reload() {
configtest_q || return 6
echo -n $"Reloading $prog: "
killproc -p $pidfile $prog -HUP
echo
}
configtest() {
$nginx -t -c $NGINX_CONF_FILE
}
configtest_q() {
$nginx -t -q -c $NGINX_CONF_FILE
}
rh_status() {
status $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
# Upgrade the binary with no downtime.
upgrade() {
local oldbin_pidfile="${pidfile}.oldbin"
configtest_q || return 6
echo -n $"Upgrading $prog: "
killproc -p $pidfile $prog -USR2
retval=$?
sleep 1
if [[ -f ${oldbin_pidfile} && -f ${pidfile} ]]; then
killproc -p $oldbin_pidfile $prog -QUIT
success $"$prog online upgrade"
echo
return 0
else
failure $"$prog online upgrade"
echo
return 1
fi
}
# Tell nginx to reopen logs
reopen_logs() {
configtest_q || return 6
echo -n $"Reopening $prog logs: "
killproc -p $pidfile $prog -USR1
retval=$?
echo
return $retval
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart|configtest|reopen_logs)
$1
;;
force-reload|upgrade)
rh_status_q || exit 7
upgrade
;;
reload)
rh_status_q || exit 7
$1
;;
status|status_q)
rh_$1
;;
condrestart|try-restart)
rh_status_q || exit 7
restart
;;
*)
echo $"Usage: $0 {start|stop|reload|configtest|status|force-reload|upgrade|restart|reopen_logs}"
exit 2
esac
[root@server1 salt]# salt -G 'roles:nginx' cmd.run 'ip addr' 查看
server2:
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:3e:37:be brd ff:ff:ff:ff:ff:ff
inet 172.25.254.132/24 brd 172.25.254.255 scope global eth1
inet6 fe80::5054:ff:fe3e:37be/64 scope link
valid_lft forever preferred_lft forever
graubs 推送文件
[root@server1 salt]# mkdir _grains
[root@server1 salt]# cd _grains/
[root@server1 _grains]# ls
[root@server1 _grains]# vim my_grains.py
[root@server1 _grains]# salt server3 saltutil.sync_grains
server3:
- grains.my_grains
[root@server1 _grains]# salt server2 grains.item qqq
server2:
----------
qqq:
www
[root@server1 _grains]# salt server2 grains.item eee
server2:
----------
eee:
rrr
通过minion 端写入mysql 数据
在主中
安装
[root@server1 ~]# yum install -y mysql-server
在辅server2
安装
[root@server2 ~]# yum install MySQL-python.x86_64 -y
更改配置文件
[root@server2 ~]# vim /etc/salt/minion 里面的mysql下
mysql.host: '172.25.254.241'
mysql.user: 'salt'
mysql.pass: 'westos'
mysql.db: 'salt'
mysql.port: 3306
重启服务
[root@server2 ~]# /etc/init.d/salt-minion restart
Stopping salt-minion:root:server2 daemon: OK
Starting salt-minion:root:server2 daemon: OK
在主中 启动mysql
[root@server1 web]# /etc/init.d/mysqld start
重置密码
[root@server1 web]# mysql_secure_installation
把test.sql 倒入mysql 中
[root@server1 ~]# mysql -p < test.sql
进入mysql
mysql> grant all on salt.* to salt@'172.25.254.%' identified by 'westos';
退出子执行 命令
[root@server1 ~]# salt 'server2' test.ping --return mysql
server2:
True
查看表
mysql> use salt
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
mysql> show tables;
+----------------+
| Tables_in_salt |
+----------------+
| jids |
| salt_events |
| salt_returns |
+----------------+
3 rows in set (0.00 sec)
mysql> select * from jids;
Empty set (0.00 sec)
mysql> select * from salt_events;
Empty set (0.00 sec)
mysql> select * from salt_returns;
+-----------+----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------+---------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------+
| fun | jid | return | id | success | full_ret
Used Avail Use% Mounted on\n/dev/mapper/VolGroup-lv_root 19G 1012M 17G 6% /\ntmpfs 499M 32K 499M 1% /dev/shm\n/dev/sda1 485M 33M 427M 8% /boot", "retcode": 0, "success": true, "fun": "cmd.run", "id": "server2"} | 2018-05-15 18:35:57 |
| test.ping | 20180515183659252392 | true
通过master 端写入数据
安装python-mysql
[root@server1 ~]# yum install MySQL-python.x86_64 -y
删除或者注释 minion 中写入的mysql
在master中添加
[root@server1 ~]# vim /etc/salt/master
mysql.host: '172.25.254.241'
mysql.user: 'salt'
mysql.pass: 'westos'
mysql.db: 'salt'
mysql.port: 3306
[root@server1 ~]# mysql -h 172.25.254.241 -u salt -pwestos 查看是否成功
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 23
Server version: 5.1.71 Source distribution
Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> exit
[root@server1 ~]# /etc/init.d/salt-master restart 重启服务
Stopping salt-master daemon: [ OK ]
Starting salt-master daemon: [ OK ]
执行run
[root@server1 ~]# salt server3 cmd.run 'df -h' --return mysql
server3:
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 19G 972M 17G 6% /
tmpfs 499M 32K 499M 1% /dev/shm
/dev/sda1 485M 33M 427M 8% /boot
查看表
进入mysql
mysql> use salt
mysql> select * from salt_returns;
| server2 | 1 | {"fun_args": [], "jid": "20180515183659252392", "return": true, "retcode": 0, "success": true, "fun": "test.ping", "id":
"server2"}
_modules 模块脚本方式查看
[root@server1 salt]# mkdir /srv/salt/_modules
[root@server1 salt]# cd _modules/
[root@server1 _modules]# ls
[root@server1 _modules]# pwd
/srv/salt/_modules
[root@server1 _modules]# vim my_disk.py
#!/usr/bin/env python
def df():
cmd = 'df -h'
return __salt__['cmd.run'](cmd)
[root@server1 _modules]# yum install tree -y
[root@server1 _modules]# cd ..
[root@server1 salt]# tree
.
├── apache
│ ├── files
│ │ └── httpd.conf
│ └── web.sls
└── _modules
└── my_disk.py
[root@server1 _modules]# salt server2 saltutil.sync_modules 必须刷新
server2:
- modules.my_disk
[root@server1 _modules]# salt server2 my_disk.df (df)是配置文件定义的 def 后面
server2:
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 19G 1012M 17G 6% /
tmpfs 499M 64K 499M 1% /dev/shm
/dev/sda1 485M 33M 427M 8% /boot
jinja 模版
[root@server1 apache]# ls
files web.sls
[root@server1 apache]# pwd
/srv/salt/apache
更改web.sls文件
[root@server1 apache]# cat web.sls
apache-install:
pkg.installed:
- pkgs:
- httpd
- php
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://apache/files/httpd.conf
- user: root
- group: root
- mode: 644
- template: jinja 模版
- context:
PORT: 8080
IP: 172.25.254.243
service.running:
- name: httpd
- enable: ture
- reload: ture
- watch:
- file: apache-install
更改http主配置文件
Listen {{ IP }}:{{ PORT}} 80 端口中更改
推送过去 80 端口改为8080
[root@server1 files]# salt server3 state.sls apache.web
server3:
----------
ID: apache-install
Function: pkg.installed
Result: True
Comment: All specified packages are already installed
Started: 19:48:26.972073
Duration: 1448.231 ms
Changes:
----------
ID: apache-install
Function: file.managed
Name: /etc/httpd/conf/httpd.conf
Result: True
Comment: File /etc/httpd/conf/httpd.conf updated
Started: 19:48:28.426311
Duration: 296.255 ms
Changes:
----------
diff:
---
+++
@@ -133,7 +133,7 @@
# prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
#
#Listen 12.34.56.78:80
-Listen 80
+Listen 172.25.254.243:8080
#
# Dynamic Shared Object (DSO) Support
----------
ID: apache-install
Function: service.running
Name: httpd
Result: True
Comment: Service reloaded
Started: 19:48:28.830995
Duration: 188.281 ms
Changes:
----------
httpd:
True
Summary for server3
------------
Succeeded: 3 (changed=2)
Failed: 0
------------
Total states run: 3
Total run time: 1.933 s
jinja 模版1
[root@server1 apache]# ls
files web.sls
[root@server1 apache]# pwd
/srv/salt/apache
[root@server1 apache]# cat web.sls
apache-install:
pkg.installed:
- pkgs:
- httpd
- php
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://apache/files/httpd.conf
- user: root
- group: root
- mode: 644
- template: jinja
- context:
PORT: {{ pillar['PORT'] }} 调用pillar
IP: {{ pillar['IP'] }}
service.running:
- name: httpd
- enable: ture
- reload: ture
- watch:
- file: apache-install
[root@server1 files]# vim httpd.conf
{% from 'apache/files/lib.sls' import test with context %} 第一行
Listen {{ IP }}:{{ PORT }} 端口行
[root@server1 pillar]# ls
top.sls web
[root@server1 pillar]# pwd
/srv/pillar
[root@server1 pillar]# cat top.sls 检测serversls 里面所有文件
base:
'*':
- web.server
[root@server1 pillar]# cd web/
[root@server1 web]# ls
server.sls
[root@server1 web]# cat server.sls
{% if grains['fqdn'] == 'server2' %}
webserver: nginx
IP: 172.25.254.242
PORT: 80
{% elif grains['fqdn'] == 'server3' %}
webserver: apache
IP: 172.25.254.243
PORT: 8080
{% endif %}
在apache推
salt server2 state.sls apache.web
如果需要更改端口只需要在这个配置文件中修改在推
源码安装keepalived
[root@server1 keepalived]# pwd
/srv/salt/keepalived
[root@server1 keepalived]# ls
files install.sls service.sls
[root@server1 keepalived]# cd files/
[root@server1 files]# ls
keepalived keepalived-1.4.3.tar.gz keepalived.conf
[root@server1 keepalived]# cat install.sls
{% set version = '1.4.3' %}
nginx-install:
file.managed:
- name: /mnt/keepalived-{{version}}.tar.gz
- source: salt://keepalived/files/keepalived-{{version}}.tar.gz
cmd.run:
- name: cd /mnt && tar zxf keepalived-{{version}}.tar.gz && cd keepalived-{{version}} && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make &> /dev/null && make install &> /dev/null
- creates: /usr/local/keepalived
/etc/keepalived:
file.directory:
- mode: 755
/etc/sysconfig/keepalived:
file.symlink:
- target: /usr/local/keepalived/etc/sysconfig/keepalived
/sbin/keepalived:
file.symlink:
- target: /usr/local/keepalived/sbin/keepalived
/etc/init.d/keepalived:
file.managed:
- source: salt://keepalived/files/keepalived
- mode: 755
[root@server1 keepalived]# cat service.sls
include:
- keepalived.install
keepalived-service:
file.managed:
- name: /etc/keepalived/keepalived.conf
- source: salt://keepalived/files/keepalived.conf
- template: jinja
- context:
STATE: {{pillar['STATE']}}
VRID: {{pillar['VRID']}}
PRIORITY: {{pillar['PRIORITY']}}
service.running:
- name: keepalived
- enable: true
- reload: ture
- watch:
- file: keepalived-service
更改pillar 配置文件
[root@server1 web]# pwd
/srv/pillar/web
[root@server1 web]# cat server.sls
{% if grains['fqdn'] == 'server2' %}
webserver: keepalived
IP: 172.25.254.242
PORT: 80
STATE: MASTER
VRID: 222
PRIORITY: 20
{% elif grains['fqdn'] == 'server3' %}
webserver: keepalived
IP: 172.25.254.243
PORT: 8080
STATE: BACKUP
VRID: 222
PRIORITTY: 10
{% endif %}
推 keepalived
更改keepalived 主配置文件
[root@server1 files]# cat keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state {{STATE}}
interface eth0
virtual_router_id {{VRID}}
priority {{PRIO}}
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.254.100
}
}
[root@server1 salt]# cat top.sls
base:
'server3':
- keepalived.service
'server2':
- keepalived.service
[root@server1 salt]# salt '*' state.highstate 执行
代理syndic-master
topmaster > syndic > master > minion
启动一台新的虚拟机
在之前master 安装
[root@server1 salt]# yum install salt-syndic.noarch -y
Loaded plugins: product-id, subscription-manager
This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
Setting up Install Process
Resolving Dependencies
--> Running transaction check
---> Package salt-syndic.noarch 0:2016.11.3-1.el6 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Installing:
salt-syndic noarch 2016.11.3-1.el6 salt 16 k
Transaction Summary
================================================================================
Install 1 Package(s)
Total download size: 16 k
Installed size: 4.4 k
Downloading Packages:
salt-syndic-2016.11.3-1.el6.noarch.rpm | 16 kB 00:00
Running rpm_check_debug
Running Transaction Test
Transaction Test Succeeded
Running Transaction
Installing : salt-syndic-2016.11.3-1.el6.noarch 1/1
Verifying : salt-syndic-2016.11.3-1.el6.noarch 1/1
Installed:
salt-syndic.noarch 0:2016.11.3-1.el6
Complete!
[root@server1 salt]# salt-key -L
Accepted Keys:
server1
server2
server3
Denied Keys:
Unaccepted Keys:
Rejected Keys:
[root@server1 salt]# salt-key -d server1
The following keys are going to be deleted:
Accepted Keys:
server1
Proceed? [N/y] y
Key for minion server1 deleteed.
[root@server1 salt]# salt-key -L
Accepted Keys:
server2
server3
Denied Keys:
Unaccepted Keys:
Rejected Keys:
更改配置为文件
[root@server1 salt]# vim /etc/salt/master
#syndic_master: masterofmasters 862行
syndic_master: 172.25.254.244
[root@server1 salt]# /etc/init.d/salt-syndic start
Starting salt-syndic daemon: [ OK ]
[root@server1 salt]# /etc/init.d/salt-master restart
Stopping salt-master daemon:
[ OK ]
Starting salt-master daemon: [ OK ]
在server4安装 master
[root@server4 ~]# yum install salt-master -y
[root@server4 ~]# vim /etc/salt/master
857 #order_masters: False
858 order_masters: true
[root@server4 ~]# /etc/init.d/salt-master start
Starting salt-master daemon: [ OK ]
[root@server4 ~]# salt-key -L 没有刷新出来等等 如果有问题重新启动多次在刷新
Accepted Keys:
Denied Keys:
Unaccepted Keys:
Rejected Keys:
[root@server4 ~]# salt-key -L
Accepted Keys:
Denied Keys:
Unaccepted Keys:
server1
Rejected Keys:
[root@server4 ~]# salt-key -A
The following keys are going to be accepted:
Unaccepted Keys:
server1
Proceed? [n/Y] y
Key for minion server1 accepted.
[root@server4 ~]# salt-key -L
Accepted Keys:
server1
Denied Keys:
Unaccepted Keys:
Rejected Keys:
在server1 syndic-master中执行
[root@server1 salt]# salt-key -L
Accepted Keys:
server2
server3
Denied Keys:
Unaccepted Keys:
Rejected Keys:
在 top master中执行 查看结果
[root@server4 ~]# salt '*' test.ping
server3:
True
server2:
True
ssh
在syndic-master中安装
[root@server1 salt]# yum install salt-ssh -y
Loaded plugins: product-id, subscription-manager
This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
Setting up Install Process
Resolving Dependencies
--> Running transaction check
---> Package salt-ssh.noarch 0:2016.11.3-1.el6 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
==========================================================================
Package Arch Version Repository Size
==========================================================================
Installing:
salt-ssh noarch 2016.11.3-1.el6 salt 16 k
Transaction Summary
==========================================================================
Install 1 Package(s)
Total download size: 16 k
Installed size: 3.0 k
Downloading Packages:
salt-ssh-2016.11.3-1.el6.noarch.rpm | 16 kB 00:00
Running rpm_check_debug
Running Transaction Test
Transaction Test Succeeded
Running Transaction
Installing : salt-ssh-2016.11.3-1.el6.noarch 1/1
Verifying : salt-ssh-2016.11.3-1.el6.noarch 1/1
Installed:
salt-ssh.noarch 0:2016.11.3-1.el6
Complete!
更改配置文件
[root@server1 salt]# vim /etc/salt/roster 在配置文件中添加 认证 用户 密码 ip
server2:
host: 172.25.254.242
user: root
passwd: redhat
server3:
user: root
host: 172.25.254.243
passwd: redhat
执行ssh 命令查看
[root@server1 salt]# salt-ssh server2 test.ping -i
server2:
True
[root@server1 salt]# salt-ssh server3 test.ping -i
server3:
True
[root@server1 salt]# salt-ssh server2 cmd.run df
server2:
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/VolGroup-lv_root 19134332 1208020 16954332 7% /
tmpfs 510200 16 510184 1% /dev/shm
/dev/sda1 495844 33469 436775 8% /boot
ssh 密钥地址 /etc/salt/pki/master/ssh
[root@server1 ssh]# ls
salt-ssh.rsa salt-ssh.rsa.pub
[root@server1 ssh]# pwd
/etc/salt/pki/master/ssh
api 使用
安装
[root@server1 ssh]# yum install salt-api -y
[root@server1 ssh]# cd /etc/pki/private/
[root@server1 private]# ls
添加key认证
[root@server1 private]# openssl genrsa 2048 > localhost.key key
Generating RSA private key, 2048 bit long modulus
...................+++
...........................................+++
e is 65537 (0x10001)
添加钥匙
[root@server1 private]# cd ..
[root@server1 tls]# cd certs/
[root@server1 certs]# ls
ca-bundle.crt ca-bundle.trust.crt make-dummy-cert Makefile renew-dummy-cert
[root@server1 certs]# make testcert
umask 77 ; \
/usr/bin/openssl req -utf8 -new -key /etc/pki/tls/private/localhost.key -x509 -days 365 -out /etc/pki/tls/certs/localhost.crt -set_serial 0
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [XX]:CN
State or Province Name (full name) []:Shaanxi
Locality Name (eg, city) [Default City]:xi'an
Organization Name (eg, company) [Default Company Ltd]:westos
Organizational Unit Name (eg, section) []:linux
Common Name (eg, your name or your server's hostname) []:server1
Email Address []:root@localhost
[root@server1 certs]# ls
ca-bundle.crt localhost.crt Makefile
ca-bundle.trust.crt make-dummy-cert renew-dummy-cert
进入主配置文件更改
[root@server1 certs]# cd /etc/salt/
[root@server1 salt]# vim master
[root@server1 salt]# useradd saltapi 添加用户添加密码
[root@server1 salt]# id saltapi
uid=500(saltapi) gid=500(saltapi) groups=500(saltapi)
[root@server1 salt]# passwd saltapi
Changing password for user saltapi.
New password:
BAD PASSWORD: it is based on a dictionary word
BAD PASSWORD: is too simple
Retype new password:
passwd: all authentication tokens updated successfully.
[root@server1 salt]# cd master.d/
[root@server1 master.d]# ls
[root@server1 master.d]# vim eauth.conf
external_auth: 执行
pam:
saltapi:
- .*
- '@wheel'
- '@rnner'
- '@jobs'
[root@server1 master.d]# vim api.conf
rest_cherrypy:
host: 172.25.254.241 主机
port: 8000 端口
ssl_crt: /etc/pki/tls/certs/localhost.crt 钥匙
ssl_key: /etc/pki/tls/private/localhost.key key
[root@server1 master.d]# /etc/init.d/salt-master restart
Stopping salt-master daemon: [ OK ]
Starting salt-master daemon: [ OK ]
[root@server1 master.d]# /etc/init.d/salt-api start
Starting salt-api daemon: [ OK ]
[root@server1 master.d]# netstat -antlp | grep 8000
tcp 0 0 172.25.254.241:8000 0.0.0.0:* LISTEN 9740/python2.6
tcp 0 0 172.25.254.241:46068 172.25.254.241:8000 TIME_WAIT -
[root@server1 salt]# curl -ssk https://172.25.254.241:8000/login -H 'Accept: application/x-yaml' -d username=saltapi -d password=westos -d eauth=pam
return:
- eauth: pam
expire: 1526511767.085124
perms:
- .*
- '@wheel'
- '@rnner'
- '@jobs'
start: 1526468567.0851231
token: 730cfad5cad55905883abcdb0b6eb2e01e876fb5
user: saltapi
执行ping 查看
[root@server1 salt]# curl -ssk https://172.25.254.241:8000/login -H 'Accept: application/x-yaml' -H 'X-Auth-Token: 730cfad5cad55905883abcdb0b6eb2e01e876fb5' -d client=local -d tgt='*' -d fun=test.ping