如何在虚拟机操作系统CentOS 7.5上部署TiDB数据库群集

 

 

Windows 10 64位   VMware® Workstation 12 Pro

Intel(R) Core(TM) i5-4750 CPU @ 3.20GHz  4核
金士顿HyperX骇客神条FURY系列DDR3 1866 32GB 
GLOWAY STK512GS3-S7 SSD (512GB)  for VMware® Workstation 12 Pro
Tigo SSD (120GB) for Windows 10 64位

TiDB群集共10台服务器全部分配3GB的内存和40GB的磁盘空间用来安装CentOS 7.5
192.168.10.111,192.168.10.112,192.168.10.113均是用来存储群集数据的节点额外会挂载40GB的硬盘一块

[root@contoso101 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.5G   29G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0


[root@contoso102 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.6G   29G   6% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0


[root@contoso111 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.3G   30G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
/dev/sdb1        40G  515M   37G   2% /data1
tmpfs           279M     0  279M   0% /run/user/0


[root@contoso112 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.3G   29G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
/dev/sdb1        40G  515M   37G   2% /data1
tmpfs           279M     0  279M   0% /run/user/0


[root@contoso113 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.3G   30G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
/dev/sdb1        40G  515M   37G   2% /data1
tmpfs           279M     0  279M   0% /run/user/0


[root@contoso121 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.5G   29G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0


[root@contoso122 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.5G   29G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0
[root@contoso122 ~]# 

[root@contoso123 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.5G   29G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0
 

[root@contoso131 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  2.4G   28G   8% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0
 

[tidb@contoso200 tidb-ansible]$ df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        13G  2.6G  9.1G  22% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0
 

[root@contoso200 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        13G  2.6G  9.1G  22% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0

 

 

 

在以下CentOS 7.5系统上

192.168.10.101,192.168.10.102,192.168.10.111,192.168.10.112,192.168.10.113,192.168.10.121,192.168.10.122,192.168.10.123,192.168.10.131,192.168.10.200执行如下命令:

# sed -i -- 's/^SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

# systemctl disable firewalld && systemctl stop firewalld

# yum -y install ntp

# cat > /etc/ntp.conf
# For more information about this file, see the man pages
# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).

driftfile /var/lib/ntp/drift

# Permit time synchronization with our time source, but do not
# permit the source to query or modify the service on this system.
restrict default nomodify notrap nopeer noquery

# Permit all access over the loopback interface.  This could
# be tightened as well, but to do so would effect some of
# the administrative functions.
restrict 127.0.0.1
restrict ::1

# Hosts on local network are less restricted.
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap

# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server 0.cn.pool.ntp.org iburst
server 1.cn.pool.ntp.org iburst
server 2.cn.pool.ntp.org iburst
server 3.cn.pool.ntp.org iburst

#broadcast 192.168.1.255 autokey        # broadcast server
#broadcastclient                        # broadcast client
#broadcast 224.0.1.1 autokey            # multicast server
#multicastclient 224.0.1.1              # multicast client
#manycastserver 239.255.254.254         # manycast server
#manycastclient 239.255.254.254 autokey # manycast client

# Enable public key cryptography.
#crypto

includefile /etc/ntp/crypto/pw

# Key file containing the keys and key identifiers used when operating
# with symmetric key cryptography.
keys /etc/ntp/keys

# Specify the key identifiers which are trusted.
#trustedkey 4 8 42

# Specify the key identifier to use with the ntpdc utility.
#requestkey 8

# Specify the key identifier to use with the ntpq utility.
#controlkey 8

# Enable writing of statistics records.
#statistics clockstats cryptostats loopstats peerstats

# Disable the monitoring facility to prevent amplification attacks using ntpdc
# monlist command when default restrict does not include the noquery flag. See
# CVE-2013-5211 for more details.
# Note: Monitoring will not be disabled with the limited restriction flag.

disable monitor

 

# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done

# systemctl enable ntpd.service && systemctl start ntpd.service && systemctl status ntpd.service

接下来在192.168.10.111,192.168.10.112,192.168.10.113上各添加一块虚拟硬盘

[root@contoso112 ~]# df -h

Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  2.6G   28G   9% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0

[root@contoso112 ~]# fdisk -l

Disk /dev/sda: 42.9 GB, 42949672960 bytes, 83886080 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x0009d233

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048    68261887    34129920   83  Linux
/dev/sda2        68261888    83886079     7812096   82  Linux swap / Solaris

Disk /dev/sdb: 42.9 GB, 42949672960 bytes, 83886080 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

[root@contoso112 ~]# fdisk /dev/sdb

Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0x139141f6.

Command (m for help): m
Command action
   a   toggle a bootable flag
   b   edit bsd disklabel
   c   toggle the dos compatibility flag
   d   delete a partition
   g   create a new empty GPT partition table
   G   create an IRIX (SGI) partition table
   l   list known partition types
   m   print this menu
   n   add a new partition
   o   create a new empty DOS partition table
   p   print the partition table
   q   quit without saving changes
   s   create a new empty Sun disklabel
   t   change a partition's system id
   u   change display/entry units
   v   verify the partition table
   w   write table to disk and exit
   x   extra functionality (experts only)

Command (m for help): p

Disk /dev/sdb: 42.9 GB, 42949672960 bytes, 83886080 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x139141f6

   Device Boot      Start         End      Blocks   Id  System

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1): 1
First sector (2048-83886079, default 2048): 
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-83886079, default 83886079): 
Using default value 83886079
Partition 1 of type Linux and of size 40 GiB is set

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.

[root@contoso112 ~]# mkfs.ext4 /dev/sdb1

mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
2621440 inodes, 10485504 blocks
524275 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2157969408
320 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks: 
        32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208, 
        4096000, 7962624

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done 

[root@contoso112 ~]# mkdir /data1

[root@contoso112 ~]# cat >> /etc/fstab
/dev/sdb1 /data1 ext4 defaults,nodelalloc,noatime 0 2

[root@contoso112 ~]# df -h

/dev/sda1        32G  1.3G   29G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
tmpfs           279M     0  279M   0% /run/user/0

[root@contoso112 ~]# reboot

[root@contoso112 ~]# df -h

Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.3G   29G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
/dev/sdb1        40G   49M   38G   1% /data1
tmpfs           279M     0  279M   0% /run/user/0

第1个关键点(/dev/sdb1    40G   49M   38G   1%   /data1)确认 数据盘挂载目录如下:

[root@contoso111 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.3G   30G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
/dev/sdb1        40G   49M   38G   1% /data1
tmpfs           279M     0  279M   0% /run/user/0

[root@contoso112 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.3G   29G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
/dev/sdb1        40G   49M   38G   1% /data1
tmpfs           279M     0  279M   0% /run/user/0

[root@contoso113 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        32G  1.3G   30G   5% /
devtmpfs        1.4G     0  1.4G   0% /dev
tmpfs           1.4G     0  1.4G   0% /dev/shm
tmpfs           1.4G   12M  1.4G   1% /run
tmpfs           1.4G     0  1.4G   0% /sys/fs/cgroup
/dev/sdb1        40G   49M   38G   1% /data1
tmpfs           279M     0  279M   0% /run/user/0

 

第2个关键点确认(ntpstat终端命令NTP服务输出的时间必须全部(共10台服务器)成功同步,否则TiDB无法安装)

 

# systemctl stop ntpd.service   // 先关掉自动运行的服务

# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done    // 手动校正服务器的正确时间

# systemctl restart ntpd.service && systemctl status ntpd.service

[root@contoso101 ~]# systemctl stop ntpd
[root@contoso101 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:17:01 ntpdate[1629]: adjust time server 85.199.214.101 offset -0.005376 sec
11 Jun 08:17:11 ntpdate[1631]: adjust time server 85.199.214.101 offset -0.001583 sec
11 Jun 08:17:21 ntpdate[1633]: adjust time server 85.199.214.101 offset 0.003341 sec
11 Jun 08:17:31 ntpdate[1635]: adjust time server 85.199.214.101 offset 0.004153 sec
[root@contoso101 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:44:19 CST; 7ms ago
  Process: 1646 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1647 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢1647 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:44:19 contoso101 ntpd[1647]: proto: precision = 0.034 usec
Jun 11 08:44:19 contoso101 ntpd[1647]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:44:19 contoso101 ntpd[1647]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:44:19 contoso101 ntpd[1647]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:44:19 contoso101 ntpd[1647]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:44:19 contoso101 ntpd[1647]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:44:19 contoso101 ntpd[1647]: Listen normally on 3 ens33 192.168.10.101 UDP 123
Jun 11 08:44:19 contoso101 ntpd[1647]: Listen normally on 4 lo ::1 UDP 123
Jun 11 08:44:19 contoso101 ntpd[1647]: Listen normally on 5 ens33 fe80::20c:29ff:fec6:77fd UDP 123
Jun 11 08:44:19 contoso101 ntpd[1647]: Listening on routing socket on fd #22 for interface updates
[root@contoso101 ~]# ntpstat
synchronised to NTP server (85.199.214.101) at stratum 2 
   time correct to within 174 ms
   polling server every 64 s
[root@contoso101 ~]#
[root@contoso102 ~]# systemctl stop ntpd
[root@contoso102 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:17:49 ntpdate[1559]: adjust time server 85.199.214.101 offset -0.002106 sec
11 Jun 08:17:58 ntpdate[1562]: adjust time server 85.199.214.101 offset 0.001024 sec
11 Jun 08:18:08 ntpdate[1564]: adjust time server 85.199.214.101 offset 0.001237 sec
11 Jun 08:18:17 ntpdate[1566]: adjust time server 85.199.214.101 offset -0.000387 sec
[root@contoso102 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:44:30 CST; 7ms ago
  Process: 1575 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1576 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢1576 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:44:30 contoso102 systemd[1]: Started Network Time Service.
Jun 11 08:44:30 contoso102 ntpd[1576]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:44:30 contoso102 ntpd[1576]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:44:30 contoso102 ntpd[1576]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:44:30 contoso102 ntpd[1576]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:44:30 contoso102 ntpd[1576]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:44:30 contoso102 ntpd[1576]: Listen normally on 3 ens33 192.168.10.102 UDP 123
Jun 11 08:44:30 contoso102 ntpd[1576]: Listen normally on 4 lo ::1 UDP 123
Jun 11 08:44:30 contoso102 ntpd[1576]: Listen normally on 5 ens33 fe80::20c:29ff:fe1b:93b4 UDP 123
Jun 11 08:44:30 contoso102 ntpd[1576]: Listening on routing socket on fd #22 for interface updates
[root@contoso102 ~]# ntpstat
synchronised to NTP server (5.103.139.163) at stratum 2 
   time correct to within 1182 ms
   polling server every 64 s
[root@contoso102 ~]# 
[root@contoso111 ~]# systemctl stop ntpd
[root@contoso111 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:18:36 ntpdate[1514]: adjust time server 85.199.214.101 offset -0.002519 sec
11 Jun 08:18:45 ntpdate[1516]: adjust time server 85.199.214.101 offset -0.003120 sec
11 Jun 08:18:55 ntpdate[1518]: adjust time server 85.199.214.101 offset 0.001369 sec
11 Jun 08:19:07 ntpdate[1520]: adjust time server 85.199.214.101 offset 0.004017 sec
[root@contoso111 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:44:35 CST; 5ms ago
  Process: 1528 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1529 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢1529 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:44:35 contoso111 systemd[1]: Started Network Time Service.
Jun 11 08:44:35 contoso111 ntpd[1529]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:44:35 contoso111 ntpd[1529]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:44:35 contoso111 ntpd[1529]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:44:35 contoso111 ntpd[1529]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:44:35 contoso111 ntpd[1529]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:44:35 contoso111 ntpd[1529]: Listen normally on 3 ens33 192.168.10.111 UDP 123
Jun 11 08:44:35 contoso111 ntpd[1529]: Listen normally on 4 lo ::1 UDP 123
Jun 11 08:44:35 contoso111 ntpd[1529]: Listen normally on 5 ens33 fe80::20c:29ff:fe47:d62 UDP 123
Jun 11 08:44:35 contoso111 ntpd[1529]: Listening on routing socket on fd #22 for interface updates
[root@contoso111 ~]# ntpstat
synchronised to NTP server (85.199.214.101) at stratum 2 
   time correct to within 1046 ms
   polling server every 64 s
[root@contoso111 ~]# 
[root@contoso112 ~]# systemctl stop ntpd
[root@contoso112 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:19:23 ntpdate[1536]: adjust time server 5.103.139.163 offset -0.040965 sec
11 Jun 08:19:33 ntpdate[1538]: adjust time server 85.199.214.101 offset -0.000816 sec
11 Jun 08:19:43 ntpdate[1540]: adjust time server 5.103.139.163 offset -0.035313 sec
11 Jun 08:19:53 ntpdate[1542]: adjust time server 85.199.214.101 offset 0.016087 sec
[root@contoso112 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:28:09 ntpdate[1545]: adjust time server 5.103.139.163 offset -0.059305 sec
11 Jun 08:28:19 ntpdate[1547]: adjust time server 85.199.214.101 offset -0.016337 sec
11 Jun 08:28:29 ntpdate[1549]: adjust time server 85.199.214.101 offset -0.009542 sec
11 Jun 08:28:38 ntpdate[1551]: adjust time server 5.79.108.34 offset 0.001133 sec
[root@contoso112 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:44:38 CST; 4ms ago
  Process: 1559 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1560 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢1560 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:44:38 contoso112 ntpd[1560]: proto: precision = 0.033 usec
Jun 11 08:44:38 contoso112 ntpd[1560]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:44:38 contoso112 ntpd[1560]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:44:38 contoso112 ntpd[1560]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:44:38 contoso112 ntpd[1560]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:44:38 contoso112 ntpd[1560]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:44:38 contoso112 ntpd[1560]: Listen normally on 3 ens33 192.168.10.112 UDP 123
Jun 11 08:44:38 contoso112 ntpd[1560]: Listen normally on 4 lo ::1 UDP 123
Jun 11 08:44:38 contoso112 ntpd[1560]: Listen normally on 5 ens33 fe80::20c:29ff:fe8d:a8c9 UDP 123
Jun 11 08:44:38 contoso112 ntpd[1560]: Listening on routing socket on fd #22 for interface updates
[root@contoso112 ~]# ntpstat
synchronised to NTP server (5.103.139.163) at stratum 2 
   time correct to within 440 ms
   polling server every 64 s
[root@contoso112 ~]# 
[root@contoso113 ~]# systemctl stop ntpd
[root@contoso113 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:20:15 ntpdate[1549]: adjust time server 5.103.139.163 offset -0.022746 sec
11 Jun 08:20:25 ntpdate[1551]: adjust time server 85.199.214.101 offset -0.000114 sec
11 Jun 08:20:35 ntpdate[1553]: adjust time server 85.199.214.101 offset 0.005723 sec
11 Jun 08:20:48 ntpdate[1555]: adjust time server 85.199.214.101 offset -0.000552 sec
[root@contoso113 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:27:20 ntpdate[1557]: adjust time server 5.103.139.163 offset -0.057081 sec
11 Jun 08:27:28 ntpdate[1559]: adjust time server 85.199.214.101 offset -0.005391 sec
11 Jun 08:27:38 ntpdate[1561]: adjust time server 85.199.214.101 offset -0.001397 sec
11 Jun 08:27:47 ntpdate[1563]: adjust time server 5.79.108.34 offset 0.012483 sec
[root@contoso113 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:44:41 CST; 8ms ago
  Process: 1572 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1573 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢1573 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:44:41 contoso113 ntpd[1573]: proto: precision = 0.033 usec
Jun 11 08:44:41 contoso113 ntpd[1573]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:44:41 contoso113 ntpd[1573]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:44:41 contoso113 ntpd[1573]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:44:41 contoso113 ntpd[1573]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:44:41 contoso113 ntpd[1573]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:44:41 contoso113 ntpd[1573]: Listen normally on 3 ens33 192.168.10.113 UDP 123
Jun 11 08:44:41 contoso113 ntpd[1573]: Listen normally on 4 lo ::1 UDP 123
Jun 11 08:44:41 contoso113 ntpd[1573]: Listen normally on 5 ens33 fe80::20c:29ff:fee5:5fd9 UDP 123
Jun 11 08:44:41 contoso113 ntpd[1573]: Listening on routing socket on fd #22 for interface updates
[root@contoso113 ~]# ntpstat
synchronised to NTP server (85.199.214.101) at stratum 2 
   time correct to within 304 ms
   polling server every 64 s
[root@contoso113 ~]# 
[root@contoso121 ~]# systemctl stop ntpd
[root@contoso121 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:21:27 ntpdate[1571]: adjust time server 85.199.214.101 offset 0.007672 sec
11 Jun 08:21:40 ntpdate[1573]: adjust time server 85.199.214.101 offset 0.001068 sec
11 Jun 08:21:48 ntpdate[1575]: adjust time server 85.199.214.101 offset -0.004003 sec
11 Jun 08:22:01 ntpdate[1577]: adjust time server 85.199.214.101 offset 0.003320 sec
[root@contoso121 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:44:47 CST; 4ms ago
  Process: 1590 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1591 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢1591 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:44:47 contoso121 systemd[1]: Started Network Time Service.
Jun 11 08:44:47 contoso121 ntpd[1591]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:44:47 contoso121 ntpd[1591]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:44:47 contoso121 ntpd[1591]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:44:47 contoso121 ntpd[1591]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:44:47 contoso121 ntpd[1591]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:44:47 contoso121 ntpd[1591]: Listen normally on 3 ens33 192.168.10.121 UDP 123
Jun 11 08:44:47 contoso121 ntpd[1591]: Listen normally on 4 lo ::1 UDP 123
Jun 11 08:44:47 contoso121 ntpd[1591]: Listen normally on 5 ens33 fe80::20c:29ff:fe6f:91a0 UDP 123
Jun 11 08:44:47 contoso121 ntpd[1591]: Listening on routing socket on fd #22 for interface updates
[root@contoso121 ~]# ntpstat
synchronised to NTP server (5.103.139.163) at stratum 2 
   time correct to within 431 ms
   polling server every 64 s
[root@contoso121 ~]# 

[root@contoso122 ~]# systemctl stop ntpd
[root@contoso122 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:23:03 ntpdate[1453]: adjust time server 85.199.214.101 offset 0.000371 sec
11 Jun 08:23:16 ntpdate[1455]: adjust time server 85.199.214.101 offset -0.001135 sec
11 Jun 08:23:29 ntpdate[1457]: adjust time server 85.199.214.101 offset -0.000338 sec
11 Jun 08:23:39 ntpdate[1459]: adjust time server 85.199.214.101 offset 0.000709 sec
[root@contoso122 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:44:50 CST; 4ms ago
  Process: 1467 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1468 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢1468 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:44:50 contoso122 systemd[1]: Started Network Time Service.
Jun 11 08:44:50 contoso122 ntpd[1468]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:44:50 contoso122 ntpd[1468]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:44:50 contoso122 ntpd[1468]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:44:50 contoso122 ntpd[1468]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:44:50 contoso122 ntpd[1468]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:44:50 contoso122 ntpd[1468]: Listen normally on 3 ens33 192.168.10.122 UDP 123
Jun 11 08:44:50 contoso122 ntpd[1468]: Listen normally on 4 ens33 fe80::20c:29ff:fec2:54fd UDP 123
Jun 11 08:44:50 contoso122 ntpd[1468]: Listen normally on 5 lo ::1 UDP 123
Jun 11 08:44:50 contoso122 ntpd[1468]: Listening on routing socket on fd #22 for interface updates
[root@contoso122 ~]# ntpstat
synchronised to NTP server (85.199.214.101) at stratum 2 
   time correct to within 171 ms
   polling server every 64 s
[root@contoso122 ~]# 
[root@contoso123 ~]# systemctl stop ntpd
[root@contoso123 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:24:01 ntpdate[1518]: adjust time server 85.199.214.101 offset -0.006508 sec
11 Jun 08:24:10 ntpdate[1520]: adjust time server 85.199.214.101 offset -0.003171 sec
11 Jun 08:24:21 ntpdate[1522]: adjust time server 85.199.214.101 offset 0.003098 sec
11 Jun 08:24:29 ntpdate[1524]: adjust time server 85.199.214.101 offset 0.001503 sec
[root@contoso123 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:44:53 CST; 3ms ago
  Process: 1534 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1535 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢1535 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:44:53 contoso123 ntpd[1535]: proto: precision = 0.034 usec
Jun 11 08:44:53 contoso123 ntpd[1535]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:44:53 contoso123 ntpd[1535]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:44:53 contoso123 ntpd[1535]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:44:53 contoso123 ntpd[1535]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:44:53 contoso123 ntpd[1535]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:44:53 contoso123 ntpd[1535]: Listen normally on 3 ens33 192.168.10.123 UDP 123
Jun 11 08:44:53 contoso123 ntpd[1535]: Listen normally on 4 lo ::1 UDP 123
Jun 11 08:44:53 contoso123 ntpd[1535]: Listen normally on 5 ens33 fe80::20c:29ff:fea2:c78d UDP 123
Jun 11 08:44:53 contoso123 ntpd[1535]: Listening on routing socket on fd #22 for interface updates
[root@contoso123 ~]# ntpstat
synchronised to NTP server (5.103.139.163) at stratum 2 
   time correct to within 211 ms
   polling server every 64 s
[root@contoso123 ~]# 
[root@contoso131 ~]# systemctl stop ntpd
[root@contoso131 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:24:46 ntpdate[4275]: adjust time server 85.199.214.101 offset 0.000771 sec
11 Jun 08:24:57 ntpdate[4277]: adjust time server 85.199.214.101 offset -0.000776 sec
11 Jun 08:25:09 ntpdate[4279]: adjust time server 85.199.214.101 offset 0.002585 sec
11 Jun 08:25:19 ntpdate[4281]: adjust time server 85.199.214.101 offset -0.002461 sec
[root@contoso131 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:44:57 CST; 4ms ago
  Process: 4289 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 4290 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢4290 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:44:57 contoso131 ntpd[4290]: proto: precision = 0.033 usec
Jun 11 08:44:57 contoso131 ntpd[4290]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:44:57 contoso131 ntpd[4290]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:44:57 contoso131 ntpd[4290]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:44:57 contoso131 ntpd[4290]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:44:57 contoso131 ntpd[4290]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:44:57 contoso131 ntpd[4290]: Listen normally on 3 ens33 192.168.10.131 UDP 123
Jun 11 08:44:57 contoso131 ntpd[4290]: Listen normally on 4 lo ::1 UDP 123
Jun 11 08:44:57 contoso131 ntpd[4290]: Listen normally on 5 ens33 fe80::20c:29ff:fedd:c76f UDP 123
Jun 11 08:44:57 contoso131 ntpd[4290]: Listening on routing socket on fd #22 for interface updates
[root@contoso131 ~]# ntpstat
synchronised to NTP server (85.199.214.101) at stratum 2 
   time correct to within 547 ms
   polling server every 64 s
[root@contoso131 ~]# 
[root@contoso200 ~]# systemctl stop ntpd
[root@contoso200 ~]# for i in {0..3};do /usr/sbin/ntpdate $i.cn.pool.ntp.org;sleep 1;done
11 Jun 08:25:33 ntpdate[1607]: adjust time server 85.199.214.101 offset 0.079034 sec
11 Jun 08:25:46 ntpdate[1610]: adjust time server 85.199.214.101 offset 0.076672 sec
11 Jun 08:25:58 ntpdate[1612]: adjust time server 85.199.214.101 offset 0.067884 sec
11 Jun 08:26:08 ntpdate[1614]: adjust time server 85.199.214.101 offset 0.061608 sec
[root@contoso200 ~]# systemctl restart ntpd.service && systemctl status ntpd.service
鈼[0m ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2018-06-11 08:45:02 CST; 6ms ago
  Process: 1623 ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1624 (ntpd)
   CGroup: /system.slice/ntpd.service
           鈹斺攢1624 /usr/sbin/ntpd -u ntp:ntp -g

Jun 11 08:45:02 contoso200 ntpd[1624]: proto: precision = 0.033 usec
Jun 11 08:45:02 contoso200 ntpd[1624]: 0.0.0.0 c01d 0d kern kernel time sync enabled
Jun 11 08:45:02 contoso200 ntpd[1624]: ntp_io: estimated max descriptors: 1024, initial socket boundary: 16
Jun 11 08:45:02 contoso200 ntpd[1624]: Listen and drop on 0 v4wildcard 0.0.0.0 UDP 123
Jun 11 08:45:02 contoso200 ntpd[1624]: Listen and drop on 1 v6wildcard :: UDP 123
Jun 11 08:45:02 contoso200 ntpd[1624]: Listen normally on 2 lo 127.0.0.1 UDP 123
Jun 11 08:45:02 contoso200 ntpd[1624]: Listen normally on 3 ens33 192.168.10.200 UDP 123
Jun 11 08:45:02 contoso200 ntpd[1624]: Listen normally on 4 lo ::1 UDP 123
Jun 11 08:45:02 contoso200 ntpd[1624]: Listen normally on 5 ens33 fe80::20c:29ff:fe92:8ec5 UDP 123
Jun 11 08:45:02 contoso200 ntpd[1624]: Listening on routing socket on fd #22 for interface updates
[root@contoso200 ~]# ntpstat
synchronised to NTP server (5.103.139.163) at stratum 2 
   time correct to within 1218 ms
   polling server every 64 s
[root@contoso200 ~]# 


如何配置 ssh 互信及 sudo 免密码
在中控机上创建 tidb 用户,并生成 ssh key。

[root@contoso200 ~]# useradd tidb
[root@contoso200 ~]# passwd tidb
[root@contoso200 ~]# su - tidb
[tidb@contoso200 ~]$ ssh-keygen -t rsa

 

Generating public/private rsa key pair.
Enter file in which to save the key (/home/tidb/.ssh/id_rsa): 
Created directory '/home/tidb/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/tidb/.ssh/id_rsa.
Your public key has been saved in /home/tidb/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:y5zynm4sbMmiFBi9ce+DU99jZQGwEtjSSOEKdKv7epU tidb@contoso200
The key's randomart image is:
+---[RSA 2048]----+
| . oo*. ...      |
|....= o. . .     |
|o o.o.. .   .    |
| +.= . .     .   |
|..+   + S   o    |
|  .. E + + o     |
| .. +o+o* +      |
| .....*+oo .     |
| .+o o ==        |
+----[SHA256]-----+
[tidb@contoso200 ~]$

[tidb@contoso200 ~]$ sudo yum -y install git

 

We trust you have received the usual lecture from the local System
Administrator. It usually boils down to these three things:

    #1) Respect the privacy of others.
    #2) Think before you type.
    #3) With great power comes great responsibility.

[sudo] password for tidb: 
tidb is not in the sudoers file.  This incident will be reported.

[tidb@contoso200 ~]$ su -      // 切换到root用户

 

Password: 123
Last login: Mon Jun 11 08:15:41 CST 2018 from 192.168.10.1 on pts/0

[root@contoso200 ~]# visudo   // 像vi编辑命令那样使用

 

## Sudoers allows particular users to run various commands as
## the root user, without needing the root password.
##
## Examples are provided at the bottom of the file for collections
## of related commands, which can then be delegated out to particular
## users or groups.
##
## This file must be edited with the 'visudo' command.

## Host Aliases
## Groups of machines. You may prefer to use hostnames (perhaps using
## wildcards for entire domains) or IP addresses instead.
# Host_Alias     FILESERVERS = fs1, fs2
# Host_Alias     MAILSERVERS = smtp, smtp2

## User Aliases
## These aren't often necessary, as you can use regular groups
## (ie, from files, LDAP, NIS, etc) in this file - just use %groupname
## rather than USERALIAS
# User_Alias ADMINS = jsmith, mikem


## Command Aliases
## These are groups of related commands...

## Networking
# Cmnd_Alias NETWORKING = /sbin/route, /sbin/ifconfig, /bin/ping, /sbin/dhclient, /usr/bin/net, /sbin/iptables, /usr/bin/rfcomm, /usr/bin/wvdial, /sbin/iwconfig, /sbin/mii-tool

## Installation and management of software
# Cmnd_Alias SOFTWARE = /bin/rpm, /usr/bin/up2date, /usr/bin/yum

## Services
# Cmnd_Alias SERVICES = /sbin/service, /sbin/chkconfig, /usr/bin/systemctl start, /usr/bin/systemctl stop, /usr/bin/systemctl reload, /usr/bin/systemctl restart, /usr/bin/systemctl status, /usr/bin/systemctl enable, /usr/bin/systemctl disable

## Updating the locate database
# Cmnd_Alias LOCATE = /usr/bin/updatedb

## Storage
# Cmnd_Alias STORAGE = /sbin/fdisk, /sbin/sfdisk, /sbin/parted, /sbin/partprobe, /bin/mount, /bin/umount

## Delegating permissions
# Cmnd_Alias DELEGATING = /usr/sbin/visudo, /bin/chown, /bin/chmod, /bin/chgrp

## Processes
# Cmnd_Alias PROCESSES = /bin/nice, /bin/kill, /usr/bin/kill, /usr/bin/killall

## Drivers
# Cmnd_Alias DRIVERS = /sbin/modprobe

# Defaults specification

#
# Refuse to run if unable to disable echo on the tty.
#
Defaults   !visiblepw

#
# Preserving HOME has security implications since many programs
# use it when searching for configuration files. Note that HOME
# is already set when the the env_reset option is enabled, so
# this option is only effective for configurations where either
# env_reset is disabled or HOME is present in the env_keep list.
#
Defaults    always_set_home
Defaults    match_group_by_gid

Defaults    env_reset
Defaults    env_keep =  "COLORS DISPLAY HOSTNAME HISTSIZE KDEDIR LS_COLORS"
Defaults    env_keep += "MAIL PS1 PS2 QTDIR USERNAME LANG LC_ADDRESS LC_CTYPE"
Defaults    env_keep += "LC_COLLATE LC_IDENTIFICATION LC_MEASUREMENT LC_MESSAGES"
Defaults    env_keep += "LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER LC_TELEPHONE"
Defaults    env_keep += "LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY"

#
# Adding HOME to env_keep may enable a user to run unrestricted
# commands via sudo.
#
# Defaults   env_keep += "HOME"

Defaults    secure_path = /sbin:/bin:/usr/sbin:/usr/bin

## Next comes the main part: which users can run what software on
## which machines (the sudoers file can be shared between multiple
## systems).
## Syntax:
##
##      user    MACHINE=COMMANDS
##
## The COMMANDS section may have other options added to it.
##
## Allow root to run any commands anywhere
root    ALL=(ALL)       ALL

## Allows members of the 'sys' group to run networking, software,
## service management apps and more.
# %sys ALL = NETWORKING, SOFTWARE, SERVICES, STORAGE, DELEGATING, PROCESSES, LOCATE, DRIVERS

## Allows people in group wheel to run all commands
%wheel  ALL=(ALL)       ALL

## Same thing without a password
# %wheel        ALL=(ALL)       NOPASSWD: ALL

## Allows members of the users group to mount and unmount the
## cdrom as root
# %users  ALL=/sbin/mount /mnt/cdrom, /sbin/umount /mnt/cdrom

## Allows members of the users group to shutdown this system
# %users  localhost=/sbin/shutdown -h now

## Read drop-in files from /etc/sudoers.d (the # here does not mean a comment)
#includedir /etc/sudoers.d
tidb ALL=(ALL) NOPASSWD: ALL
"/etc/sudoers.tmp" 113L, 3967C written

[root@contoso200 ~]# cat /etc/sudoers   // 命令visudo写入的tidb ALL=(ALL) NOPASSWD: ALL实际上保存在/etc/sudoers
[root@contoso200 ~]# su - tidb  // 切换到tidb用户来安装tidb-ansible
[tidb@contoso200 ~]$ sudo yum -y install epel-release
[tidb@contoso200 ~]$ sudo yum -y install python-pip curl
[tidb@contoso200 ~]$ sudo yum -y install git
[tidb@contoso200 ~]$ git clone https://github.com/pingcap/tidb-ansible.git
[tidb@contoso200 ~]$ cd tidb-ansible
[tidb@contoso200 tidb-ansible]$ sudo pip install -r ./requirements.txt

[tidb@contoso200 tidb-ansible]$ ansible --version

 

ansible 2.5.4
  config file = /home/tidb/tidb-ansible/ansible.cfg
  configured module search path = [u'/home/tidb/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
  ansible python module location = /usr/lib/python2.7/site-packages/ansible
  executable location = /bin/ansible
  python version = 2.7.5 (default, Apr 11 2018, 07:36:10) [GCC 4.8.5 20150623 (Red Hat 4.8.5-28)]

[tidb@contoso200 tidb-ansible]$ pwd

 

/home/tidb/tidb-ansible 

[tidb@contoso200 tidb-ansible]$ ls

 

ansible.cfg                  filter_plugins         local_prepare.yml           start.yml
bootstrap.yml                get_pd_leader_tls.yml  log                         stop_spark.yml
callback_plugins             get_pd_leader.yml      migrate_monitor.yml         stop.yml
cloud                        get_store_id_tls.yml   README.md                   templates
collect_diagnosis.yml        get_store_id.yml       requirements.txt            unsafe_cleanup_container.yml
conf                         group_vars             roles                       unsafe_cleanup_data.yml
create_grafana_api_keys.yml  hosts.ini              rolling_update_monitor.yml  unsafe_cleanup.yml
create_users.yml             inventory.ini          rolling_update.yml
deploy_ntp.yml               library                scripts
deploy.yml                   LICENSE                start_spark.yml

[tidb@contoso200 tidb-ansible]$ sudo yum -y install sshpass

 

 

 

如何使用 Ansible 自动配置 ssh 互信及 sudo 免密码

参照在中控机器上下载 TiDB-Ansible下载 TiDB-Ansible
(https://github.com/pingcap/docs-cn/blob/master/op-guide/ansible-deployment.md#在中控机器上下载-tidb-ansible),
将你的部署目标机器 IP 添加到 [servers] 区块下。
我已经将ntp_server设置成了cn.pool.ntp.org

 

 

[tidb@contoso200 tidb-ansible]$ cat > hosts.ini
[servers]
192.168.10.101
192.168.10.102
192.168.10.111
192.168.10.112
192.168.10.113
192.168.10.121
192.168.10.122
192.168.10.123
192.168.10.131

[all:vars]
username = tidb
ntp_server = cn.pool.ntp.org

分配机器资源,指定每台机器充当的节点角色,编辑 inventory.ini 文件
[tidb@contoso200 tidb-ansible]$ cat > inventory.ini
## TiDB Cluster Part
[tidb_servers]
192.168.10.101
192.168.10.102

[tikv_servers]
192.168.10.111
192.168.10.112
192.168.10.113

[pd_servers]
192.168.10.121
192.168.10.122
192.168.10.123

[spark_master]

[spark_slaves]

## Monitoring Part
# prometheus and pushgateway servers
[monitoring_servers]
192.168.10.131

[grafana_servers]
192.168.10.131

# node_exporter and blackbox_exporter servers
[monitored_servers]
192.168.10.101
192.168.10.102
192.168.10.111
192.168.10.112
192.168.10.113
192.168.10.121
192.168.10.122
192.168.10.123
192.168.10.131

[alertmanager_servers]
192.168.10.131

[kafka_exporter_servers]

## Binlog Part
[pump_servers:children]
tidb_servers

[drainer_servers]

## Group variables
[pd_servers:vars]
# location_labels = ["zone","rack","host"]

## Global variables
[all:vars]
deploy_dir = /data1/deploy

## Connection
# ssh via normal user
ansible_user = tidb

cluster_name = test-cluster

tidb_version = latest

# process supervision, [systemd, supervise]
process_supervision = systemd

# timezone of deployment region
timezone = Asia/Shanghai
set_timezone = True

enable_firewalld = False
# check NTP service
enable_ntpd = True
set_hostname = False

## binlog trigger
enable_binlog = False
# zookeeper address of kafka cluster for binlog, example:
# zookeeper_addrs = "192.168.0.11:2181,192.168.0.12:2181,192.168.0.13:2181"
zookeeper_addrs = ""
# kafka cluster address for monitoring, example:
# kafka_addrs = "192.168.0.11:9092,192.168.0.12:9092,192.168.0.13:9092"
kafka_addrs = ""

# store slow query log into seperate file
enable_slow_query_log = False

# enable TLS authentication in the TiDB cluster
enable_tls = False

# KV mode
deploy_without_tidb = False

# Optional: Set if you already have a alertmanager server.
# Format: alertmanager_host:alertmanager_port
alertmanager_target = ""

grafana_admin_user = "admin"
grafana_admin_password = "admin"


### Collect diagnosis
collect_log_recent_hours = 2

enable_bandwidth_limit = True
# default: 10Mb/s, unit: Kbit/s
collect_bandwidth_limit = 10000

 

执行以下命令,按提示输入部署目标机器 root 密码,
它会把tidb ALL=(ALL) NOPASSWD: ALL参数行写入到每台服务器/etc/sudoers文件末尾,
这样使用 Ansible 自动配置 ssh 互信及 sudo 免密码就配置成功啦
[tidb@contoso200 tidb-ansible]$ ansible-playbook -i hosts.ini create_users.yml -k

SSH password: 123

PLAY [all] ******************************************************************************************************************

TASK [create user] **********************************************************************************************************
changed: [192.168.10.101]
changed: [192.168.10.111]
changed: [192.168.10.113]
changed: [192.168.10.102]
changed: [192.168.10.112]
changed: [192.168.10.122]
changed: [192.168.10.121]
changed: [192.168.10.123]
changed: [192.168.10.131]

TASK [set authorized key] ***************************************************************************************************
changed: [192.168.10.112]
changed: [192.168.10.102]
changed: [192.168.10.101]
changed: [192.168.10.113]
changed: [192.168.10.111]
changed: [192.168.10.122]
changed: [192.168.10.131]
changed: [192.168.10.123]
changed: [192.168.10.121]

TASK [update sudoers file] **************************************************************************************************
changed: [192.168.10.111]
changed: [192.168.10.113]
changed: [192.168.10.102]
changed: [192.168.10.101]
changed: [192.168.10.112]
changed: [192.168.10.121]
changed: [192.168.10.123]
changed: [192.168.10.122]
changed: [192.168.10.131]

PLAY RECAP ******************************************************************************************************************
192.168.10.101             : ok=3    changed=3    unreachable=0    failed=0   
192.168.10.102             : ok=3    changed=3    unreachable=0    failed=0   
192.168.10.111             : ok=3    changed=3    unreachable=0    failed=0   
192.168.10.112             : ok=3    changed=3    unreachable=0    failed=0   
192.168.10.113             : ok=3    changed=3    unreachable=0    failed=0   
192.168.10.121             : ok=3    changed=3    unreachable=0    failed=0   
192.168.10.122             : ok=3    changed=3    unreachable=0    failed=0   
192.168.10.123             : ok=3    changed=3    unreachable=0    failed=0   
192.168.10.131             : ok=3    changed=3    unreachable=0    failed=0   

Congrats! All goes well. :-)
[tidb@contoso200 tidb-ansible]$ 

注意:Grafana Dashboard 上的 Report 按钮可用来生成 PDF 文件,
此功能依赖 fontconfig 包。如需使用该功能,登录 grafana_servers 机
器,用以下命令安装:
[root@contoso131 ~]# yum -y install fontconfig

1.确认 tidb-ansible/inventory.ini 文件中 ansible_user = tidb,本例
使用 tidb 用户作为服务运行用户,配置如下:
## Connection
# ssh via normal user
ansible_user = tidb

执行以下命令如果所有 server 返回 root 表示 tidb 用户 sudo 免密码配置成功。
ansible -i inventory.ini all -m shell -a 'whoami' -b

2.执行 local_prepare.yml playbook,联网下载 TiDB binary 到中控机
ansible-playbook local_prepare.yml

3.初始化系统环境,修改内核参数 至少8核CPU
ansible-playbook bootstrap.yml

4.部署 TiDB 集群软件
ansible-playbook deploy.yml

5.启动 TiDB 集群
ansible-playbook start.yml

 

 

 

[tidb@contoso200 tidb-ansible]$ ansible -i inventory.ini all -m shell -a 'whoami' -b

192.168.10.122 | SUCCESS | rc=0 >>
root
192.168.10.131 | SUCCESS | rc=0 >>
root
192.168.10.123 | SUCCESS | rc=0 >>
root
192.168.10.121 | SUCCESS | rc=0 >>
root
192.168.10.101 | SUCCESS | rc=0 >>
root
192.168.10.102 | SUCCESS | rc=0 >>
root
192.168.10.111 | SUCCESS | rc=0 >>
root
192.168.10.112 | SUCCESS | rc=0 >>
root
192.168.10.113 | SUCCESS | rc=0 >>
root
[tidb@contoso200 tidb-ansible]$ 

[tidb@contoso200 tidb-ansible]$ ansible-playbook local_prepare.yml

PLAY [do local preparation] ************************************************************************************************

TASK [local : Stop if ansible version is too low, make sure that the Ansible version is Ansible 2.4.2 or later, otherwise a compatibility issue occurs.] ***
ok: [localhost] => {
    "changed": false, 
    "msg": "All assertions passed"
}

TASK [local : create downloads and resources directories] ******************************************************************
changed: [localhost] => (item=/home/tidb/tidb-ansible/downloads)
changed: [localhost] => (item=/home/tidb/tidb-ansible/resources)
changed: [localhost] => (item=/home/tidb/tidb-ansible/resources/bin)

TASK [local : create cert directory] ***************************************************************************************

TASK [local : create packages.yml] *****************************************************************************************
changed: [localhost]

TASK [local : create specific deployment method packages.yml] **************************************************************
changed: [localhost]

TASK [local : include_vars] ************************************************************************************************
ok: [localhost]

TASK [local : include_vars] ************************************************************************************************
ok: [localhost]

TASK [local : detect outbound network] *************************************************************************************
ok: [localhost]

TASK [local : set outbound network fact] ***********************************************************************************
ok: [localhost]

TASK [local : fail] ********************************************************************************************************

TASK [local : detect GFW] **************************************************************************************************
ok: [localhost]

TASK [local : set GFW fact] ************************************************************************************************
ok: [localhost]

TASK [local : download tidb binary] ****************************************************************************************
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tidb-latest-linux-amd64-unportable.tar.gz', u'version': u'latest', u'name': u'tidb'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tidb-binlog-latest-linux-amd64.tar.gz', u'version': u'latest', u'name': u'tidb-binlog'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tidb-tools-latest-linux-amd64.tar.gz', u'version': u'latest', u'name': u'tidb-tools'})

TASK [local : download common binary] **************************************************************************************
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/fio-2.16.tar.gz', u'checksum': u'sha256:bb8e2413aaa154e4b738c5f79a346353ae993c93632d93ad316fcfa70eaa4d04', u'version': 2.16, u'name': u'fio'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/grafana_collector-latest-linux-amd64.tar.gz', u'version': u'latest', u'name': u'grafana_collector'})
changed: [localhost] => (item={u'url': u'https://download.pingcap.org/kafka_exporter-1.1.0.linux-amd64.tar.gz', u'version': u'1.1.0', u'name': u'kafka_exporter'})

TASK [local : download cfssl binary] ***************************************************************************************

TASK [local : download cfssljson binary] ***********************************************************************************

TASK [local : include_tasks] ***********************************************************************************************
included: /home/tidb/tidb-ansible/roles/local/tasks/binary_deployment.yml for localhost

TASK [local : download other binary] ***************************************************************************************

TASK [local : download other binary under gfw] *****************************************************************************
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/prometheus-2.2.1.linux-amd64.tar.gz', u'version': u'2.2.1', u'name': u'prometheus'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/alertmanager-0.14.0.linux-amd64.tar.gz', u'version': u'0.14.0', u'name': u'alertmanager'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/node_exporter-0.15.2.linux-amd64.tar.gz', u'version': u'0.15.2', u'name': u'node_exporter'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/pushgateway-0.4.0.linux-amd64.tar.gz', u'version': u'0.4.0', u'name': u'pushgateway'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/grafana-4.6.3.linux-x64.tar.gz', u'version': u'4.6.3', u'name': u'grafana'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/blackbox_exporter-0.12.0.linux-amd64.tar.gz', u'version': u'0.12.0', u'name': u'blackbox_exporter'})

TASK [local : download TiSpark packages] ***********************************************************************************
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/spark-2.1.1-bin-hadoop2.7.tgz', u'checksum': u'sha256:372ac4f73221c07696793101007a4f19e31566d1f0d9bd0e5205b6fb5b45bfc2', u'version': u'2.1.1', u'name': u'spark-2.1.1-bin-hadoop2.7.tgz'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tispark-0.1.0-SNAPSHOT-jar-with-dependencies.jar', u'name': u'tispark-SNAPSHOT-jar-with-dependencies.jar'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tispark-sample-data.tar.gz', u'version': u'0.1.0-beta', u'name': u'tispark-sample-data.tar.gz'})

TASK [local : unarchive third party binary] ********************************************************************************
changed: [localhost] => (item={u'url': u'https://github.com/prometheus/prometheus/releases/download/v2.2.1/prometheus-2.2.1.linux-amd64.tar.gz', u'version': u'2.2.1', u'name': u'prometheus'})
changed: [localhost] => (item={u'url': u'https://github.com/prometheus/alertmanager/releases/download/v0.14.0/alertmanager-0.14.0.linux-amd64.tar.gz', u'version': u'0.14.0', u'name': u'alertmanager'})
changed: [localhost] => (item={u'url': u'https://github.com/prometheus/node_exporter/releases/download/v0.15.2/node_exporter-0.15.2.linux-amd64.tar.gz', u'version': u'0.15.2', u'name': u'node_exporter'})
changed: [localhost] => (item={u'url': u'https://github.com/prometheus/blackbox_exporter/releases/download/v0.12.0/blackbox_exporter-0.12.0.linux-amd64.tar.gz', u'version': u'0.12.0', u'name': u'blackbox_exporter'})
changed: [localhost] => (item={u'url': u'https://github.com/prometheus/pushgateway/releases/download/v0.4.0/pushgateway-0.4.0.linux-amd64.tar.gz', u'version': u'0.4.0', u'name': u'pushgateway'})
changed: [localhost] => (item={u'url': u'https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.3.linux-x64.tar.gz', u'version': u'4.6.3', u'name': u'grafana'})

TASK [local : unarchive tispark-sample-data] *******************************************************************************
changed: [localhost]

TASK [local : cp monitoring binary] ****************************************************************************************
changed: [localhost] => (item=alertmanager)
changed: [localhost] => (item=prometheus)
changed: [localhost] => (item=node_exporter)
changed: [localhost] => (item=pushgateway)
changed: [localhost] => (item=blackbox_exporter)

TASK [local : cp tispark-sample-data] **************************************************************************************
changed: [localhost]

TASK [local : unarchive tidb binary] ***************************************************************************************
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tidb-latest-linux-amd64-unportable.tar.gz', u'version': u'latest', u'name': u'tidb'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tidb-binlog-latest-linux-amd64.tar.gz', u'version': u'latest', u'name': u'tidb-binlog'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tidb-tools-latest-linux-amd64.tar.gz', u'version': u'latest', u'name': u'tidb-tools'})

TASK [local : unarchive common binary] *************************************************************************************
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/fio-2.16.tar.gz', u'checksum': u'sha256:bb8e2413aaa154e4b738c5f79a346353ae993c93632d93ad316fcfa70eaa4d04', u'version': 2.16, u'name': u'fio'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/grafana_collector-latest-linux-amd64.tar.gz', u'version': u'latest', u'name': u'grafana_collector'})
changed: [localhost] => (item={u'url': u'https://download.pingcap.org/kafka_exporter-1.1.0.linux-amd64.tar.gz', u'version': u'1.1.0', u'name': u'kafka_exporter'})

TASK [local : cp tidb binary] **********************************************************************************************
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tidb-latest-linux-amd64-unportable.tar.gz', u'version': u'latest', u'name': u'tidb'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tidb-binlog-latest-linux-amd64.tar.gz', u'version': u'latest', u'name': u'tidb-binlog'})
changed: [localhost] => (item={u'url': u'http://download.pingcap.org/tidb-tools-latest-linux-amd64.tar.gz', u'version': u'latest', u'name': u'tidb-tools'})

TASK [local : cp fio binary] ***********************************************************************************************
changed: [localhost] => (item=fio)

TASK [local : cp grafana_collector binary and fonts] ***********************************************************************
changed: [localhost]

TASK [local : cp kafka_exporter binary] ************************************************************************************
changed: [localhost] => (item=kafka_exporter)

TASK [local : cp daemontools binary] ***************************************************************************************

TASK [local : clean up download dir] ***************************************************************************************
changed: [localhost]

PLAY RECAP *****************************************************************************************************************
localhost                  : ok=26   changed=18   unreachable=0    failed=0   

Congrats! All goes well. :-)
[tidb@contoso200 tidb-ansible]$ 

[tidb@contoso200 tidb-ansible]$ ansible-playbook bootstrap.yml

PLAY [initializing deployment target] ***************************************************************************************

TASK [check_config_static : Ensure only one monitoring host exists] *********************************************************

TASK [check_config_static : Warn if TiDB host not exists] *******************************************************************

TASK [check_config_static : Ensure zookeeper address of kafka cluster is set when enable_binlog] ****************************

TASK [check_config_static : Ensure PD host exists] **************************************************************************

TASK [check_config_static : Ensure TiKV host exists] ************************************************************************

TASK [check_config_static : Ensure enable_tls is disabled when deployment_method is docker] *********************************

TASK [check_config_static : Check ansible_user variable] ********************************************************************

TASK [check_config_static : Close old control master] ***********************************************************************
ok: [localhost]

PLAY [check node config] ****************************************************************************************************

TASK [pre-ansible : disk space check - fail when disk is full] **************************************************************
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [pre-ansible : Get distro name from /etc/os-release] *******************************************************************
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.101]
ok: [192.168.10.113]
ok: [192.168.10.112]
ok: [192.168.10.102]

TASK [pre-ansible : set distro facts] ***************************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.102]
ok: [192.168.10.101]

TASK [pre-ansible : python check] *******************************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.111]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.102]

TASK [pre-ansible : set has_python facts] ***********************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.101]
ok: [192.168.10.113]
ok: [192.168.10.112]
ok: [192.168.10.102]

TASK [pre-ansible : set has_python facts] ***********************************************************************************

TASK [pre-ansible : include_tasks] ******************************************************************************************

TASK [pre-ansible : include_tasks] ******************************************************************************************
included: /home/tidb/tidb-ansible/roles/pre-ansible/tasks/root_tasks.yml for 192.168.10.131, 192.168.10.121, 192.168.10.122, 192.168.10.123, 192.168.10.111, 192.168.10.112, 192.168.10.113, 192.168.10.101, 192.168.10.102

TASK [pre-ansible : Debian/Ubuntu - install python] *************************************************************************

TASK [pre-ansible : Redhat/CentOS - install python] *************************************************************************

TASK [pre-ansible : Redhat/CentOS - Make sure ntp, ntpstat have been installed] *********************************************
ok: [192.168.10.131] => (item=[u'ntp'])
ok: [192.168.10.111] => (item=[u'ntp'])
ok: [192.168.10.123] => (item=[u'ntp'])
ok: [192.168.10.121] => (item=[u'ntp'])
ok: [192.168.10.122] => (item=[u'ntp'])
ok: [192.168.10.112] => (item=[u'ntp'])
ok: [192.168.10.113] => (item=[u'ntp'])
ok: [192.168.10.102] => (item=[u'ntp'])
ok: [192.168.10.101] => (item=[u'ntp'])

TASK [pre-ansible : Debian/Ubuntu - Make sure ntp, ntpstat have been installed] *********************************************

TASK [bootstrap : gather facts] *********************************************************************************************
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.121]
ok: [192.168.10.111]
ok: [192.168.10.113]
ok: [192.168.10.102]
ok: [192.168.10.112]
ok: [192.168.10.101]

TASK [bootstrap : group hosts by distribution] ******************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [bootstrap : Set deploy_user - set ansible_user as default] ************************************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [bootstrap : Set deploy_dir if not presented] **************************************************************************

TASK [bootstrap : include_tasks] ********************************************************************************************
included: /home/tidb/tidb-ansible/roles/bootstrap/tasks/root_tasks.yml for 192.168.10.131, 192.168.10.121, 192.168.10.122, 192.168.10.123, 192.168.10.111, 192.168.10.112, 192.168.10.113, 192.168.10.101, 192.168.10.102

TASK [bootstrap : setting absent kernel params] *****************************************************************************
ok: [192.168.10.131] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.122] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.123] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.121] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.111] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.112] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.101] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.113] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.102] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})

TASK [bootstrap : setting present kernel params] ****************************************************************************
changed: [192.168.10.121] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
changed: [192.168.10.122] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
changed: [192.168.10.131] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
changed: [192.168.10.111] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
changed: [192.168.10.123] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
changed: [192.168.10.121] => (item={u'name': u'vm.swappiness', u'value': 0})
changed: [192.168.10.122] => (item={u'name': u'vm.swappiness', u'value': 0})
changed: [192.168.10.131] => (item={u'name': u'vm.swappiness', u'value': 0})
changed: [192.168.10.123] => (item={u'name': u'vm.swappiness', u'value': 0})
changed: [192.168.10.111] => (item={u'name': u'vm.swappiness', u'value': 0})
changed: [192.168.10.121] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
changed: [192.168.10.111] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
changed: [192.168.10.131] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
changed: [192.168.10.122] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
changed: [192.168.10.123] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
changed: [192.168.10.122] => (item={u'name': u'fs.file-max', u'value': 1000000})
changed: [192.168.10.121] => (item={u'name': u'fs.file-max', u'value': 1000000})
changed: [192.168.10.131] => (item={u'name': u'fs.file-max', u'value': 1000000})
changed: [192.168.10.111] => (item={u'name': u'fs.file-max', u'value': 1000000})
changed: [192.168.10.123] => (item={u'name': u'fs.file-max', u'value': 1000000})
changed: [192.168.10.102] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
changed: [192.168.10.112] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
changed: [192.168.10.101] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
changed: [192.168.10.113] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
changed: [192.168.10.101] => (item={u'name': u'vm.swappiness', u'value': 0})
changed: [192.168.10.112] => (item={u'name': u'vm.swappiness', u'value': 0})
changed: [192.168.10.102] => (item={u'name': u'vm.swappiness', u'value': 0})
changed: [192.168.10.113] => (item={u'name': u'vm.swappiness', u'value': 0})
changed: [192.168.10.102] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
changed: [192.168.10.112] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
changed: [192.168.10.113] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
changed: [192.168.10.101] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
changed: [192.168.10.102] => (item={u'name': u'fs.file-max', u'value': 1000000})
changed: [192.168.10.101] => (item={u'name': u'fs.file-max', u'value': 1000000})
changed: [192.168.10.112] => (item={u'name': u'fs.file-max', u'value': 1000000})
changed: [192.168.10.113] => (item={u'name': u'fs.file-max', u'value': 1000000})

TASK [bootstrap : update /etc/security/limits.conf] *************************************************************************
changed: [192.168.10.121]
changed: [192.168.10.131]
changed: [192.168.10.111]
changed: [192.168.10.122]
changed: [192.168.10.123]
changed: [192.168.10.112]
changed: [192.168.10.102]
changed: [192.168.10.113]
changed: [192.168.10.101]

TASK [bootstrap : swap - disable swap] **************************************************************************************
changed: [192.168.10.131]
changed: [192.168.10.111]
changed: [192.168.10.121]
changed: [192.168.10.123]
changed: [192.168.10.122]
changed: [192.168.10.101]
changed: [192.168.10.113]
changed: [192.168.10.112]
changed: [192.168.10.102]

TASK [bootstrap : create group] *********************************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.112]
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.113]

TASK [bootstrap : create account] *******************************************************************************************
ok: [192.168.10.123]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.111]
ok: [192.168.10.101]
ok: [192.168.10.113]
ok: [192.168.10.112]
ok: [192.168.10.102]

TASK [bootstrap : create top deploy dir when under root] ********************************************************************
changed: [192.168.10.111]
changed: [192.168.10.122]
changed: [192.168.10.121]
changed: [192.168.10.131]
changed: [192.168.10.123]
changed: [192.168.10.112]
changed: [192.168.10.101]
changed: [192.168.10.113]
changed: [192.168.10.102]

TASK [bootstrap : create wal_dir deploy dir when under root] ****************************************************************

TASK [bootstrap : create raftdb_path deploy dir when under root] ************************************************************

TASK [bootstrap : set timezone to Asia/Shanghai] ****************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.111]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.122]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [bootstrap : set hostname if hostname is not distinguishable] **********************************************************

TASK [bootstrap : set hostname in hosts file] *******************************************************************************

TASK [bootstrap : determine if firewalld is running] ************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.121]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.102]
ok: [192.168.10.101]

TASK [bootstrap : disable firewalld] ****************************************************************************************

TASK [bootstrap : or to enable firewalld] ***********************************************************************************

TASK [bootstrap : check centos configuration file exists] *******************************************************************
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.121]
ok: [192.168.10.111]
ok: [192.168.10.122]
ok: [192.168.10.112]
ok: [192.168.10.101]
ok: [192.168.10.113]
ok: [192.168.10.102]

TASK [bootstrap : check debian configuration file exists] *******************************************************************
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [bootstrap : modify centos irqbalance configuration file] **************************************************************
changed: [192.168.10.122]
changed: [192.168.10.123]
changed: [192.168.10.121]
changed: [192.168.10.131]
changed: [192.168.10.111]
changed: [192.168.10.112]
changed: [192.168.10.101]
changed: [192.168.10.113]
changed: [192.168.10.102]

TASK [bootstrap : modify debian irqbalance configuration file] **************************************************************

TASK [bootstrap : start irqbalance service] *********************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.111]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.102]

PLAY [check system] *********************************************************************************************************

TASK [check_system_necessary : Disk space check - Fail task when disk is full] **********************************************
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.113]
ok: [192.168.10.112]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [check_system_necessary : get facts] ***********************************************************************************
ok: [192.168.10.111]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.102]

TASK [check_system_necessary : Preflight check - System version] ************************************************************

TASK [check_system_necessary : Preflight check - Set NTP service status] ****************************************************
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.102]

TASK [check_system_necessary : Preflight check - NTP service] ***************************************************************

TASK [check_system_necessary : Set deploy_user - set ansible_user as default] ***********************************************

TASK [check_system_necessary : Set deploy_dir if not presented] *************************************************************

TASK [check_system_necessary : Determine which mountpoint deploy dir exists on] *********************************************
ok: [192.168.10.121]
ok: [192.168.10.111]
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [check_system_necessary : set_fact] ************************************************************************************

TASK [check_system_necessary : Preflight check - Check bug if file system is xfs] *******************************************

TASK [check_system_necessary : Preflight check - Clean check file for xfs file system] **************************************
ok: [192.168.10.123]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.111]
ok: [192.168.10.121]
ok: [192.168.10.101]
ok: [192.168.10.113]
ok: [192.168.10.112]
ok: [192.168.10.102]

TASK [check_system_necessary : set_fact] ************************************************************************************

TASK [check_system_necessary : Preflight check - Does deploy dir meet ext4 file system requirement] *************************

TASK [check_system_necessary : Preflight check - set fssystem_check_result status] ******************************************
ok: [192.168.10.111] => (item={u'block_used': 540666, u'uuid': u'd5da2ed5-40ad-4260-a2f6-0203234b6ff4', u'size_total': 42140401664, u'block_total': 10288184, u'inode_available': 2621428, u'block_available': 9747518, u'size_available': 39925833728, u'fstype': u'ext4', u'inode_total': 2621440, u'options': u'rw,noatime,nodelalloc,data=ordered', u'device': u'/dev/sdb1', u'inode_used': 12, u'block_size': 4096, u'mount': u'/data1'})
ok: [192.168.10.112] => (item={u'block_used': 540666, u'uuid': u'58669e95-7663-4011-83bd-25af1d69c10b', u'size_total': 42140401664, u'block_total': 10288184, u'inode_available': 2621428, u'block_available': 9747518, u'size_available': 39925833728, u'fstype': u'ext4', u'inode_total': 2621440, u'options': u'rw,noatime,nodelalloc,data=ordered', u'device': u'/dev/sdb1', u'inode_used': 12, u'block_size': 4096, u'mount': u'/data1'})
ok: [192.168.10.113] => (item={u'block_used': 540666, u'uuid': u'18211ead-9963-474e-977d-0cd576b6e833', u'size_total': 42140401664, u'block_total': 10288184, u'inode_available': 2621428, u'block_available': 9747518, u'size_available': 39925833728, u'fstype': u'ext4', u'inode_total': 2621440, u'options': u'rw,noatime,nodelalloc,data=ordered', u'device': u'/dev/sdb1', u'inode_used': 12, u'block_size': 4096, u'mount': u'/data1'})

TASK [check_system_necessary : Preflight check - Does deploy dir meet ext4 or xfs file system requirement] ******************

TASK [check_system_optional : Preflight check - Check TiDB server's CPU] ****************************************************
fatal: [192.168.10.101]: FAILED! => {"changed": false, "msg": "This machine does not have sufficient CPU to run TiDB, at least 8 cores."}
fatal: [192.168.10.102]: FAILED! => {"changed": false, "msg": "This machine does not have sufficient CPU to run TiDB, at least 8 cores."}

NO MORE HOSTS LEFT **********************************************************************************************************
        to retry, use: --limit @/home/tidb/tidb-ansible/retry_files/bootstrap.retry

PLAY RECAP ******************************************************************************************************************
192.168.10.101             : ok=29   changed=5    unreachable=0    failed=1   
192.168.10.102             : ok=29   changed=5    unreachable=0    failed=1   
192.168.10.111             : ok=30   changed=5    unreachable=0    failed=0   
192.168.10.112             : ok=30   changed=5    unreachable=0    failed=0   
192.168.10.113             : ok=30   changed=5    unreachable=0    failed=0   
192.168.10.121             : ok=29   changed=5    unreachable=0    failed=0   
192.168.10.122             : ok=29   changed=5    unreachable=0    failed=0   
192.168.10.123             : ok=29   changed=5    unreachable=0    failed=0   
192.168.10.131             : ok=29   changed=5    unreachable=0    failed=0   
localhost                  : ok=1    changed=0    unreachable=0    failed=0   


ERROR MESSAGE SUMMARY *******************************************************************************************************
[192.168.10.101]: Ansible FAILED! => playbook: bootstrap.yml; TASK: check_system_optional : Preflight check - Check TiDB server's CPU; message: {"changed": false, "msg": "This machine does not have sufficient CPU to run TiDB, at least 8 cores."}
[192.168.10.102]: Ansible FAILED! => playbook: bootstrap.yml; TASK: check_system_optional : Preflight check - Check TiDB server's CPU; message: {"changed": false, "msg": "This machine does not have sufficient CPU to run TiDB, at least 8 cores."}
Ask for help:
Contact us: support@pingcap.com
It seems that you encounter some problems. You can send an email to the above email address, attached with the tidb-ansible/inventory.ini and tidb-ansible/log/ansible.log files and the error message, or new issue on https://github.com/pingcap/tidb-ansible/issues. We'll try our best to help you deploy a TiDB cluster. Thanks. :-)
[tidb@contoso200 tidb-ansible]$ pwd
/home/tidb/tidb-ansible
[tidb@contoso200 tidb-ansible]$ 

You can change the dev_mode in group_vars/all.yml to True to skip the check
or you can change the tidb_min_cpu in roles/check_system_optional/defaults/main.yml.

 

[tidb@contoso200 tidb-ansible]$ cat group_vars/all.yml

 

---
# Variables here are applicable to all host groups
status_dir: "{{ deploy_dir }}/status"
backup_dir: "{{ deploy_dir }}/backup"
images_dir: "{{ deploy_dir }}/images"

# Local
downloads_dir: "{{ playbook_dir }}/downloads"
resources_dir: "{{ playbook_dir }}/resources"
fetch_tmp_dir: "{{ playbook_dir }}/collect_diagnosis_data"
fetch_dir:     "{{ playbook_dir }}/collect_diagnosis"
cert_dir:      "{{ playbook_dir }}/conf/ssl"
script_dir:    "{{ playbook_dir }}/scripts"
binary_dir:    "{{ playbook_dir }}/resources/bin"

# default configuration for multiple host groups and roles
node_exporter_port: 9100
blackbox_exporter_port: 9115
kafka_exporter_port: 9308

# docker
docker_bin_dir: "/usr/bin"

# Random shifts for retrying failed ops like downloading
retry_stagger: 5

dev_mode: False

# deployment methods, [binary, docker] docker deployment method is not recommended and deprecated.
deployment_method: binary

[tidb@contoso200 tidb-ansible]$ sed -i -- 's/^dev_mode: False/dev_mode: True/g' /home/tidb/tidb-ansible/group_vars/all.yml

[tidb@contoso200 tidb-ansible]$ cat roles/check_system_optional/defaults/main.yml 

---

# CPU
tidb_min_cpu: 8
tikv_min_cpu: 8
pd_min_cpu: 4
monitor_min_cpu: 4

# Mem
tidb_min_ram: 16000
tikv_min_ram: 16000
pd_min_ram: 8000
monitor_min_ram: 8000

# Disk
tidb_min_disk: 500000000000
tikv_min_disk: 500000000000
pd_min_disk: 200000000000
monitor_min_disk: 500000000000

[tidb@contoso200 tidb-ansible]$

[tidb@contoso200 tidb-ansible]$ cat > /home/tidb/tidb-ansible/roles/check_system_optional/defaults/main.yml
---

# CPU
tidb_min_cpu: 4
tikv_min_cpu: 4
pd_min_cpu: 4
monitor_min_cpu: 4

# Mem
tidb_min_ram: 2700
tikv_min_ram: 2700
pd_min_ram: 2700
monitor_min_ram: 2700

# Disk
tidb_min_disk: 32000000000
tikv_min_disk: 40000000000
pd_min_disk: 20000000000
monitor_min_disk: 3200000000
 

 

[root@contoso200 ~]# su - tidb
[root@contoso200 ~]# cd /home/tidb/tidb-ansible

 

[tidb@contoso200 tidb-ansible]$ ansible-playbook bootstrap.yml      // 再一次运行此命令检测群集硬件配置需求是否满足

PLAY [initializing deployment target] ***************************************************************************************

TASK [check_config_static : Ensure only one monitoring host exists] *********************************************************

TASK [check_config_static : Warn if TiDB host not exists] *******************************************************************

TASK [check_config_static : Ensure zookeeper address of kafka cluster is set when enable_binlog] ****************************

TASK [check_config_static : Ensure PD host exists] **************************************************************************

TASK [check_config_static : Ensure TiKV host exists] ************************************************************************

TASK [check_config_static : Ensure enable_tls is disabled when deployment_method is docker] *********************************

TASK [check_config_static : Check ansible_user variable] ********************************************************************

TASK [check_config_static : Close old control master] ***********************************************************************
ok: [localhost]

PLAY [check node config] ****************************************************************************************************

TASK [pre-ansible : disk space check - fail when disk is full] **************************************************************
ok: [192.168.10.101]
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.102]
ok: [192.168.10.113]
ok: [192.168.10.112]

TASK [pre-ansible : Get distro name from /etc/os-release] *******************************************************************
ok: [192.168.10.121]
ok: [192.168.10.101]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.113]

TASK [pre-ansible : set distro facts] ***************************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.101]
ok: [192.168.10.123]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [pre-ansible : python check] *******************************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.113]

TASK [pre-ansible : set has_python facts] ***********************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [pre-ansible : set has_python facts] ***********************************************************************************

TASK [pre-ansible : include_tasks] ******************************************************************************************

TASK [pre-ansible : include_tasks] ******************************************************************************************
included: /home/tidb/tidb-ansible/roles/pre-ansible/tasks/root_tasks.yml for 192.168.10.131, 192.168.10.121, 192.168.10.122, 192.168.10.123, 192.168.10.101, 192.168.10.102, 192.168.10.111, 192.168.10.112, 192.168.10.113

TASK [pre-ansible : Debian/Ubuntu - install python] *************************************************************************

TASK [pre-ansible : Redhat/CentOS - install python] *************************************************************************

TASK [pre-ansible : Redhat/CentOS - Make sure ntp, ntpstat have been installed] *********************************************
ok: [192.168.10.123] => (item=[u'ntp'])
ok: [192.168.10.101] => (item=[u'ntp'])
ok: [192.168.10.131] => (item=[u'ntp'])
ok: [192.168.10.121] => (item=[u'ntp'])
ok: [192.168.10.122] => (item=[u'ntp'])
ok: [192.168.10.112] => (item=[u'ntp'])
ok: [192.168.10.113] => (item=[u'ntp'])
ok: [192.168.10.102] => (item=[u'ntp'])
ok: [192.168.10.111] => (item=[u'ntp'])

TASK [pre-ansible : Debian/Ubuntu - Make sure ntp, ntpstat have been installed] *********************************************

TASK [bootstrap : gather facts] *********************************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.123]
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [bootstrap : group hosts by distribution] ******************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [bootstrap : Set deploy_user - set ansible_user as default] ************************************************************
ok: [192.168.10.131]
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [bootstrap : Set deploy_dir if not presented] **************************************************************************

TASK [bootstrap : include_tasks] ********************************************************************************************
included: /home/tidb/tidb-ansible/roles/bootstrap/tasks/root_tasks.yml for 192.168.10.131, 192.168.10.101, 192.168.10.102, 192.168.10.121, 192.168.10.122, 192.168.10.123, 192.168.10.111, 192.168.10.112, 192.168.10.113

TASK [bootstrap : setting absent kernel params] *****************************************************************************
ok: [192.168.10.101] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.122] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.121] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.131] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.102] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.112] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.111] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.123] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})
ok: [192.168.10.113] => (item={u'name': u'net.ipv4.tcp_tw_recycle', u'value': 0})

TASK [bootstrap : setting present kernel params] ****************************************************************************
ok: [192.168.10.102] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
ok: [192.168.10.122] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
ok: [192.168.10.131] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
ok: [192.168.10.101] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
ok: [192.168.10.121] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
ok: [192.168.10.102] => (item={u'name': u'vm.swappiness', u'value': 0})
ok: [192.168.10.122] => (item={u'name': u'vm.swappiness', u'value': 0})
ok: [192.168.10.131] => (item={u'name': u'vm.swappiness', u'value': 0})
ok: [192.168.10.121] => (item={u'name': u'vm.swappiness', u'value': 0})
ok: [192.168.10.101] => (item={u'name': u'vm.swappiness', u'value': 0})
ok: [192.168.10.102] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
ok: [192.168.10.121] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
ok: [192.168.10.122] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
ok: [192.168.10.101] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
ok: [192.168.10.131] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
ok: [192.168.10.122] => (item={u'name': u'fs.file-max', u'value': 1000000})
ok: [192.168.10.121] => (item={u'name': u'fs.file-max', u'value': 1000000})
ok: [192.168.10.101] => (item={u'name': u'fs.file-max', u'value': 1000000})
ok: [192.168.10.102] => (item={u'name': u'fs.file-max', u'value': 1000000})
ok: [192.168.10.131] => (item={u'name': u'fs.file-max', u'value': 1000000})
ok: [192.168.10.111] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
ok: [192.168.10.123] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
ok: [192.168.10.113] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
ok: [192.168.10.112] => (item={u'name': u'net.core.somaxconn', u'value': 32768})
ok: [192.168.10.111] => (item={u'name': u'vm.swappiness', u'value': 0})
ok: [192.168.10.123] => (item={u'name': u'vm.swappiness', u'value': 0})
ok: [192.168.10.113] => (item={u'name': u'vm.swappiness', u'value': 0})
ok: [192.168.10.112] => (item={u'name': u'vm.swappiness', u'value': 0})
ok: [192.168.10.111] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
ok: [192.168.10.123] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
ok: [192.168.10.112] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
ok: [192.168.10.113] => (item={u'name': u'net.ipv4.tcp_syncookies', u'value': 0})
ok: [192.168.10.111] => (item={u'name': u'fs.file-max', u'value': 1000000})
ok: [192.168.10.112] => (item={u'name': u'fs.file-max', u'value': 1000000})
ok: [192.168.10.123] => (item={u'name': u'fs.file-max', u'value': 1000000})
ok: [192.168.10.113] => (item={u'name': u'fs.file-max', u'value': 1000000})

TASK [bootstrap : update /etc/security/limits.conf] *************************************************************************
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [bootstrap : swap - disable swap] **************************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.111]
ok: [192.168.10.113]

TASK [bootstrap : create group] *********************************************************************************************
ok: [192.168.10.102]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.101]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [bootstrap : create account] *******************************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [bootstrap : create top deploy dir when under root] ********************************************************************
ok: [192.168.10.102]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.101]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [bootstrap : create wal_dir deploy dir when under root] ****************************************************************

TASK [bootstrap : create raftdb_path deploy dir when under root] ************************************************************

TASK [bootstrap : set timezone to Asia/Shanghai] ****************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.113]
ok: [192.168.10.112]

TASK [bootstrap : set hostname if hostname is not distinguishable] **********************************************************

TASK [bootstrap : set hostname in hosts file] *******************************************************************************

TASK [bootstrap : determine if firewalld is running] ************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [bootstrap : disable firewalld] ****************************************************************************************

TASK [bootstrap : or to enable firewalld] ***********************************************************************************

TASK [bootstrap : check centos configuration file exists] *******************************************************************
ok: [192.168.10.122]
ok: [192.168.10.101]
ok: [192.168.10.121]
ok: [192.168.10.102]
ok: [192.168.10.131]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.113]
ok: [192.168.10.112]

TASK [bootstrap : check debian configuration file exists] *******************************************************************
ok: [192.168.10.102]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.101]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [bootstrap : modify centos irqbalance configuration file] **************************************************************
ok: [192.168.10.101]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.102]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.111]

TASK [bootstrap : modify debian irqbalance configuration file] **************************************************************

TASK [bootstrap : start irqbalance service] *********************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.113]
ok: [192.168.10.111]
ok: [192.168.10.112]

PLAY [check system] *********************************************************************************************************

TASK [check_system_necessary : Disk space check - Fail task when disk is full] **********************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.122]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_system_necessary : get facts] ***********************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.102]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_system_necessary : Preflight check - System version] ************************************************************

TASK [check_system_necessary : Preflight check - Set NTP service status] ****************************************************
ok: [192.168.10.131]
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.112]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.113]

TASK [check_system_necessary : Preflight check - NTP service] ***************************************************************

TASK [check_system_necessary : Set deploy_user - set ansible_user as default] ***********************************************

TASK [check_system_necessary : Set deploy_dir if not presented] *************************************************************

TASK [check_system_necessary : Determine which mountpoint deploy dir exists on] *********************************************
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.123]
ok: [192.168.10.113]

TASK [check_system_necessary : set_fact] ************************************************************************************

TASK [check_system_necessary : Preflight check - Check bug if file system is xfs] *******************************************

TASK [check_system_necessary : Preflight check - Clean check file for xfs file system] **************************************
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_system_necessary : set_fact] ************************************************************************************

TASK [check_system_necessary : Preflight check - Does deploy dir meet ext4 file system requirement] *************************

TASK [check_system_necessary : Preflight check - set fssystem_check_result status] ******************************************
ok: [192.168.10.111] => (item={u'block_used': 540666, u'uuid': u'd5da2ed5-40ad-4260-a2f6-0203234b6ff4', u'size_total': 42140401664, u'block_total': 10288184, u'inode_available': 2621428, u'block_available': 9747518, u'size_available': 39925833728, u'fstype': u'ext4', u'inode_total': 2621440, u'options': u'rw,noatime,nodelalloc,data=ordered', u'device': u'/dev/sdb1', u'inode_used': 12, u'block_size': 4096, u'mount': u'/data1'})
ok: [192.168.10.112] => (item={u'block_used': 540666, u'uuid': u'58669e95-7663-4011-83bd-25af1d69c10b', u'size_total': 42140401664, u'block_total': 10288184, u'inode_available': 2621428, u'block_available': 9747518, u'size_available': 39925833728, u'fstype': u'ext4', u'inode_total': 2621440, u'options': u'rw,noatime,nodelalloc,data=ordered', u'device': u'/dev/sdb1', u'inode_used': 12, u'block_size': 4096, u'mount': u'/data1'})
ok: [192.168.10.113] => (item={u'block_used': 540666, u'uuid': u'18211ead-9963-474e-977d-0cd576b6e833', u'size_total': 42140401664, u'block_total': 10288184, u'inode_available': 2621428, u'block_available': 9747518, u'size_available': 39925833728, u'fstype': u'ext4', u'inode_total': 2621440, u'options': u'rw,noatime,nodelalloc,data=ordered', u'device': u'/dev/sdb1', u'inode_used': 12, u'block_size': 4096, u'mount': u'/data1'})

TASK [check_system_necessary : Preflight check - Does deploy dir meet ext4 or xfs file system requirement] ******************

TASK [check_system_optional : Preflight check - Check TiDB server's CPU] ****************************************************

TASK [check_system_optional : Preflight check - Check TiKV server's CPU] ****************************************************

TASK [check_system_optional : Preflight check - Check PD server's CPU] ******************************************************

TASK [check_system_optional : Preflight check - Check Monitor server's CPU] *************************************************

TASK [check_system_optional : Preflight check - Check TiDB server's RAM] ****************************************************

TASK [check_system_optional : Preflight check - Check TiKV server's RAM] ****************************************************

TASK [check_system_optional : Preflight check - Check PD server's RAM] ******************************************************

TASK [check_system_optional : Preflight check - Check Monitor server's RAM] *************************************************

TASK [check_system_optional : Set deploy_user - set ansible_user as default] ************************************************

TASK [check_system_optional : Set deploy_dir if not presented] **************************************************************

TASK [check_system_optional : Determine which mountpoint deploy dir exists on] **********************************************

TASK [check_system_optional : set_fact] *************************************************************************************

TASK [check_system_optional : Preflight check - Check TiDB server's disk space] *********************************************

TASK [check_system_optional : set_fact] *************************************************************************************

TASK [check_system_optional : Preflight check - Check TiKV server's disk space] *********************************************

TASK [check_system_optional : set_fact] *************************************************************************************

TASK [check_system_optional : Preflight check - Check PD server's disk space] ***********************************************

TASK [check_system_optional : set_fact] *************************************************************************************

TASK [check_system_optional : Preflight check - Check Monitor server's disk space] ******************************************

PLAY [tikv_servers machine benchmark] ***************************************************************************************

TASK [machine_benchmark : deploy fio] ***************************************************************************************

TASK [machine_benchmark : Benchmark randread iops of deploy_dir disk with fio command] **************************************

TASK [machine_benchmark : Clean fio randread iops benchmark temporary file] *************************************************

TASK [machine_benchmark : debug] ********************************************************************************************

TASK [machine_benchmark : Preflight check - Does fio randread iops of deploy_dir disk meet requirement] *********************

TASK [machine_benchmark : Benchmark mixed randread and sequential write iops of deploy_dir disk with fio command] ***********

TASK [machine_benchmark : set fio disk_mix_randread_iops facts] *************************************************************

TASK [machine_benchmark : set fio disk_mix_write_iops facts] ****************************************************************

TASK [machine_benchmark : Clean fio mixed randread and sequential write iops benchmark temporary file] **********************

TASK [machine_benchmark : debug] ********************************************************************************************

TASK [machine_benchmark : Preflight check - Does fio mixed randread and sequential write iops of deploy_dir disk meet requirement - randread] ***

TASK [machine_benchmark : Preflight check - Does fio mixed randread and sequential write iops of deploy_dir disk meet requirement - sequential write] ***

TASK [machine_benchmark : Benchmark mixed randread and sequential write latency of deploy_dir disk with fio command] ********

TASK [machine_benchmark : set fio disk_mix_randread_lat facts] **************************************************************

TASK [machine_benchmark : set fio disk_mix_write_lat facts] *****************************************************************

TASK [machine_benchmark : Clean fio mixed randread and sequential write latency benchmark temporary file] *******************

TASK [machine_benchmark : debug] ********************************************************************************************

TASK [machine_benchmark : Preflight check - Does fio mixed randread and sequential write latency of deploy_dir disk meet requirement - randread] ***

TASK [machine_benchmark : Preflight check - Does fio mixed randread and sequential write latency of deploy_dir disk meet requirement - sequential write] ***

TASK [machine_benchmark : Benchmark the direct write speed of deploy_dir disk with dd command] ******************************

TASK [machine_benchmark : Clean dd benchmark temporary file] ****************************************************************

TASK [machine_benchmark : debug] ********************************************************************************************

TASK [machine_benchmark : Preflight check - Does the write speed of deploy_dir disk meet requirement] ***********************

PLAY [create ops scripts] ***************************************************************************************************

TASK [ops : create check_tikv.sh script] ************************************************************************************
changed: [localhost]

TASK [ops : create pd-ctl.sh script] ****************************************************************************************
changed: [localhost]

PLAY RECAP ******************************************************************************************************************
192.168.10.101             : ok=29   changed=0    unreachable=0    failed=0   
192.168.10.102             : ok=29   changed=0    unreachable=0    failed=0   
192.168.10.111             : ok=30   changed=0    unreachable=0    failed=0   
192.168.10.112             : ok=30   changed=0    unreachable=0    failed=0   
192.168.10.113             : ok=30   changed=0    unreachable=0    failed=0   
192.168.10.121             : ok=29   changed=0    unreachable=0    failed=0   
192.168.10.122             : ok=29   changed=0    unreachable=0    failed=0   
192.168.10.123             : ok=29   changed=0    unreachable=0    failed=0   
192.168.10.131             : ok=29   changed=0    unreachable=0    failed=0   
localhost                  : ok=3    changed=2    unreachable=0    failed=0   

Congrats! All goes well. :-)
[tidb@contoso200 tidb-ansible]$ 

[tidb@contoso200 tidb-ansible]$ ansible-playbook deploy.yml

PLAY [check config locally] ************************************************************************************************

TASK [check_config_static : Ensure only one monitoring host exists] ********************************************************

TASK [check_config_static : Warn if TiDB host not exists] ******************************************************************

TASK [check_config_static : Ensure zookeeper address of kafka cluster is set when enable_binlog] ***************************

TASK [check_config_static : Ensure PD host exists] *************************************************************************

TASK [check_config_static : Ensure TiKV host exists] ***********************************************************************

TASK [check_config_static : Ensure enable_tls is disabled when deployment_method is docker] ********************************

TASK [check_config_static : Check ansible_user variable] *******************************************************************

TASK [check_config_static : Close old control master] **********************************************************************
ok: [localhost]

PLAY [initializing deployment target] **************************************************************************************

TASK [check_config_dynamic : Disk space check - Fail task when disk is full] ***********************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.101]
ok: [192.168.10.123]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_config_dynamic : get facts] ************************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.122]
ok: [192.168.10.102]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.111]

TASK [check_config_dynamic : Preflight check - Set NTP service status] *****************************************************
ok: [192.168.10.123]
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.101]
ok: [192.168.10.121]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.113]
ok: [192.168.10.112]

TASK [check_config_dynamic : Preflight check - NTP service] ****************************************************************

TASK [check_config_dynamic : Set enable_binlog - set default] **************************************************************

TASK [check_config_dynamic : Set enable_binlog - set inverse] **************************************************************

TASK [check_config_dynamic : Set deploy_user - set ansible_user as default] ************************************************
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_config_dynamic : Set deploy_dir if not presented] **************************************************************

TASK [check_config_dynamic : Preflight check - Get umask] ******************************************************************
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.122]
ok: [192.168.10.101]
ok: [192.168.10.131]
ok: [192.168.10.111]
ok: [192.168.10.113]
ok: [192.168.10.112]
ok: [192.168.10.102]

TASK [check_config_dynamic : Preflight check - Get Hard Max FD limit] ******************************************************
ok: [192.168.10.123]
ok: [192.168.10.101]
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_config_dynamic : Preflight check - Does the system have a standard umask?] *************************************

TASK [check_config_dynamic : Preflight check - ulimit -n] ******************************************************************

TASK [check_config_dynamic : Preflight check - Get deploy dir permissions] *************************************************
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.101]
ok: [192.168.10.113]
ok: [192.168.10.112]
ok: [192.168.10.111]
ok: [192.168.10.102]

TASK [check_config_dynamic : Preflight check - Does deploy dir have appropriate permissions?] ******************************

TASK [check_config_dynamic : Check swap] ***********************************************************************************

TASK [check_config_dynamic : environment check (deploy dir)] ***************************************************************
ok: [192.168.10.122]
ok: [192.168.10.101]
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.102]
ok: [192.168.10.113]
ok: [192.168.10.111]

TASK [check_config_dynamic : environment check (supervise)] ****************************************************************

TASK [check_config_dynamic : config skip variables (default)] **************************************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_config_dynamic : config skip variables] ************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.121]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_config_dynamic : config skip variables] ************************************************************************

PLAY [deploying monitoring agent] ******************************************************************************************

TASK [common_dir : create deploy directories] ******************************************************************************
changed: [192.168.10.112] => (item=/data1/deploy/scripts)
changed: [192.168.10.101] => (item=/data1/deploy/scripts)
changed: [192.168.10.102] => (item=/data1/deploy/scripts)
changed: [192.168.10.111] => (item=/data1/deploy/scripts)
changed: [192.168.10.113] => (item=/data1/deploy/scripts)
changed: [192.168.10.111] => (item=/data1/deploy/conf)
changed: [192.168.10.101] => (item=/data1/deploy/conf)
changed: [192.168.10.112] => (item=/data1/deploy/conf)
changed: [192.168.10.102] => (item=/data1/deploy/conf)
changed: [192.168.10.113] => (item=/data1/deploy/conf)
changed: [192.168.10.101] => (item=/data1/deploy/backup)
changed: [192.168.10.102] => (item=/data1/deploy/backup)
changed: [192.168.10.112] => (item=/data1/deploy/backup)
changed: [192.168.10.111] => (item=/data1/deploy/backup)
changed: [192.168.10.113] => (item=/data1/deploy/backup)
changed: [192.168.10.122] => (item=/data1/deploy/scripts)
changed: [192.168.10.121] => (item=/data1/deploy/scripts)
changed: [192.168.10.123] => (item=/data1/deploy/scripts)
changed: [192.168.10.131] => (item=/data1/deploy/scripts)
changed: [192.168.10.122] => (item=/data1/deploy/conf)
changed: [192.168.10.121] => (item=/data1/deploy/conf)
changed: [192.168.10.123] => (item=/data1/deploy/conf)
changed: [192.168.10.131] => (item=/data1/deploy/conf)
changed: [192.168.10.122] => (item=/data1/deploy/backup)
changed: [192.168.10.121] => (item=/data1/deploy/backup)
changed: [192.168.10.123] => (item=/data1/deploy/backup)
changed: [192.168.10.131] => (item=/data1/deploy/backup)

TASK [common_dir : create status directory] ********************************************************************************
changed: [192.168.10.112] => (item=/data1/deploy/status)
changed: [192.168.10.113] => (item=/data1/deploy/status)
changed: [192.168.10.111] => (item=/data1/deploy/status)

TASK [common_dir : create deploy binary directory] *************************************************************************
changed: [192.168.10.112] => (item=/data1/deploy/bin)
changed: [192.168.10.101] => (item=/data1/deploy/bin)
changed: [192.168.10.113] => (item=/data1/deploy/bin)
changed: [192.168.10.102] => (item=/data1/deploy/bin)
changed: [192.168.10.111] => (item=/data1/deploy/bin)
changed: [192.168.10.123] => (item=/data1/deploy/bin)
changed: [192.168.10.121] => (item=/data1/deploy/bin)
changed: [192.168.10.122] => (item=/data1/deploy/bin)
changed: [192.168.10.131] => (item=/data1/deploy/bin)

TASK [common_dir : create docker image directory] **************************************************************************

TASK [node_exporter : include_tasks] ***************************************************************************************
included: /home/tidb/tidb-ansible/roles/node_exporter/tasks/binary_deployment.yml for 192.168.10.101, 192.168.10.102, 192.168.10.111, 192.168.10.112, 192.168.10.113, 192.168.10.121, 192.168.10.122, 192.168.10.123, 192.168.10.131

TASK [node_exporter : create deploy directories] ***************************************************************************
changed: [192.168.10.101] => (item=/data1/deploy/log)
changed: [192.168.10.112] => (item=/data1/deploy/log)
changed: [192.168.10.102] => (item=/data1/deploy/log)
changed: [192.168.10.113] => (item=/data1/deploy/log)
changed: [192.168.10.111] => (item=/data1/deploy/log)
changed: [192.168.10.122] => (item=/data1/deploy/log)
changed: [192.168.10.131] => (item=/data1/deploy/log)
changed: [192.168.10.121] => (item=/data1/deploy/log)
changed: [192.168.10.123] => (item=/data1/deploy/log)

TASK [node_exporter : deploy node_exporter binary] *************************************************************************
changed: [192.168.10.102]
changed: [192.168.10.113]
changed: [192.168.10.101]
changed: [192.168.10.111]
changed: [192.168.10.112]
changed: [192.168.10.123]
changed: [192.168.10.122]
changed: [192.168.10.121]
changed: [192.168.10.131]

TASK [node_exporter : create run script] ***********************************************************************************
changed: [192.168.10.113] => (item=run)
changed: [192.168.10.101] => (item=run)
changed: [192.168.10.112] => (item=run)
changed: [192.168.10.111] => (item=run)
changed: [192.168.10.102] => (item=run)
changed: [192.168.10.123] => (item=run)
changed: [192.168.10.121] => (item=run)
changed: [192.168.10.131] => (item=run)
changed: [192.168.10.122] => (item=run)

TASK [node_exporter : include_tasks] ***************************************************************************************
included: /home/tidb/tidb-ansible/roles/node_exporter/tasks/systemd_deployment.yml for 192.168.10.101, 192.168.10.102, 192.168.10.111, 192.168.10.112, 192.168.10.113, 192.168.10.121, 192.168.10.122, 192.168.10.123, 192.168.10.131

TASK [node_exporter : deploy systemd] **************************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.111]
changed: [192.168.10.102]
changed: [192.168.10.112]
changed: [192.168.10.101]
changed: [192.168.10.113]
changed: [192.168.10.121]
changed: [192.168.10.131]
changed: [192.168.10.122]
changed: [192.168.10.123]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.112] => (item=start)
changed: [192.168.10.111] => (item=start)
changed: [192.168.10.102] => (item=start)
changed: [192.168.10.101] => (item=start)
changed: [192.168.10.113] => (item=start)
changed: [192.168.10.112] => (item=stop)
changed: [192.168.10.113] => (item=stop)
changed: [192.168.10.111] => (item=stop)
changed: [192.168.10.102] => (item=stop)
changed: [192.168.10.101] => (item=stop)
changed: [192.168.10.123] => (item=start)
changed: [192.168.10.122] => (item=start)
changed: [192.168.10.121] => (item=start)
changed: [192.168.10.131] => (item=start)
changed: [192.168.10.123] => (item=stop)
changed: [192.168.10.121] => (item=stop)
changed: [192.168.10.122] => (item=stop)
changed: [192.168.10.131] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.102]
changed: [192.168.10.101]
changed: [192.168.10.112]
changed: [192.168.10.113]
changed: [192.168.10.111]
changed: [192.168.10.122]
changed: [192.168.10.121]
changed: [192.168.10.123]
changed: [192.168.10.131]

TASK [node_exporter : prepare firewalld white list] ************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.131]

TASK [blackbox_exporter : include_tasks] ***********************************************************************************
included: /home/tidb/tidb-ansible/roles/blackbox_exporter/tasks/binary_deployment.yml for 192.168.10.101, 192.168.10.102, 192.168.10.111, 192.168.10.112, 192.168.10.113, 192.168.10.121, 192.168.10.122, 192.168.10.123, 192.168.10.131

TASK [blackbox_exporter : create deploy directories] ***********************************************************************
ok: [192.168.10.112] => (item=/data1/deploy/log)
ok: [192.168.10.101] => (item=/data1/deploy/log)
ok: [192.168.10.113] => (item=/data1/deploy/log)
ok: [192.168.10.111] => (item=/data1/deploy/log)
ok: [192.168.10.102] => (item=/data1/deploy/log)
ok: [192.168.10.121] => (item=/data1/deploy/log)
ok: [192.168.10.122] => (item=/data1/deploy/log)
ok: [192.168.10.123] => (item=/data1/deploy/log)
ok: [192.168.10.131] => (item=/data1/deploy/log)

TASK [blackbox_exporter : deploy blackbox_exporter binary] *****************************************************************
changed: [192.168.10.101]
changed: [192.168.10.111]
changed: [192.168.10.112]
changed: [192.168.10.102]
changed: [192.168.10.113]
changed: [192.168.10.122]
changed: [192.168.10.121]
changed: [192.168.10.123]
changed: [192.168.10.131]

TASK [blackbox_exporter : blackbox_exporter binary add CAP_NET_RAW capability] *********************************************
changed: [192.168.10.102]
changed: [192.168.10.101]
changed: [192.168.10.111]
changed: [192.168.10.113]
changed: [192.168.10.112]
changed: [192.168.10.123]
changed: [192.168.10.121]
changed: [192.168.10.122]
changed: [192.168.10.131]

TASK [blackbox_exporter : create run script] *******************************************************************************
changed: [192.168.10.111] => (item=run)
changed: [192.168.10.101] => (item=run)
changed: [192.168.10.113] => (item=run)
changed: [192.168.10.112] => (item=run)
changed: [192.168.10.102] => (item=run)
changed: [192.168.10.121] => (item=run)
changed: [192.168.10.122] => (item=run)
changed: [192.168.10.123] => (item=run)
changed: [192.168.10.131] => (item=run)

TASK [blackbox_exporter : include_tasks] ***********************************************************************************
included: /home/tidb/tidb-ansible/roles/blackbox_exporter/tasks/systemd_deployment.yml for 192.168.10.101, 192.168.10.102, 192.168.10.111, 192.168.10.112, 192.168.10.113, 192.168.10.121, 192.168.10.122, 192.168.10.123, 192.168.10.131

TASK [blackbox_exporter : deploy systemd] **********************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.112]
changed: [192.168.10.102]
changed: [192.168.10.101]
changed: [192.168.10.113]
changed: [192.168.10.111]
changed: [192.168.10.121]
changed: [192.168.10.122]
changed: [192.168.10.123]
changed: [192.168.10.131]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.101] => (item=start)
changed: [192.168.10.112] => (item=start)
changed: [192.168.10.111] => (item=start)
changed: [192.168.10.102] => (item=start)
changed: [192.168.10.113] => (item=start)
changed: [192.168.10.101] => (item=stop)
changed: [192.168.10.111] => (item=stop)
changed: [192.168.10.112] => (item=stop)
changed: [192.168.10.113] => (item=stop)
changed: [192.168.10.102] => (item=stop)
changed: [192.168.10.121] => (item=start)
changed: [192.168.10.122] => (item=start)
changed: [192.168.10.123] => (item=start)
changed: [192.168.10.131] => (item=start)
changed: [192.168.10.121] => (item=stop)
changed: [192.168.10.131] => (item=stop)
changed: [192.168.10.123] => (item=stop)
changed: [192.168.10.122] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.101]
changed: [192.168.10.102]
changed: [192.168.10.113]
changed: [192.168.10.112]
changed: [192.168.10.111]
changed: [192.168.10.121]
changed: [192.168.10.122]
changed: [192.168.10.123]
changed: [192.168.10.131]

TASK [blackbox_exporter : create config file] ******************************************************************************
changed: [192.168.10.112]
changed: [192.168.10.101]
changed: [192.168.10.102]
changed: [192.168.10.113]
changed: [192.168.10.111]
changed: [192.168.10.121]
changed: [192.168.10.122]
changed: [192.168.10.131]
changed: [192.168.10.123]

TASK [blackbox_exporter : backup conf file] ********************************************************************************

TASK [blackbox_exporter : prepare firewalld white list] ********************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.131]

PLAY [deploying diagnostic tools] ******************************************************************************************

TASK [common_dir : create deploy directories] ******************************************************************************
ok: [192.168.10.102] => (item=/data1/deploy/scripts)
ok: [192.168.10.113] => (item=/data1/deploy/scripts)
ok: [192.168.10.112] => (item=/data1/deploy/scripts)
ok: [192.168.10.101] => (item=/data1/deploy/scripts)
ok: [192.168.10.111] => (item=/data1/deploy/scripts)
ok: [192.168.10.102] => (item=/data1/deploy/conf)
ok: [192.168.10.101] => (item=/data1/deploy/conf)
ok: [192.168.10.113] => (item=/data1/deploy/conf)
ok: [192.168.10.112] => (item=/data1/deploy/conf)
ok: [192.168.10.111] => (item=/data1/deploy/conf)
ok: [192.168.10.101] => (item=/data1/deploy/backup)
ok: [192.168.10.112] => (item=/data1/deploy/backup)
ok: [192.168.10.102] => (item=/data1/deploy/backup)
ok: [192.168.10.113] => (item=/data1/deploy/backup)
ok: [192.168.10.111] => (item=/data1/deploy/backup)
ok: [192.168.10.122] => (item=/data1/deploy/scripts)
ok: [192.168.10.131] => (item=/data1/deploy/scripts)
ok: [192.168.10.121] => (item=/data1/deploy/scripts)
ok: [192.168.10.123] => (item=/data1/deploy/scripts)
ok: [192.168.10.131] => (item=/data1/deploy/conf)
ok: [192.168.10.121] => (item=/data1/deploy/conf)
ok: [192.168.10.123] => (item=/data1/deploy/conf)
ok: [192.168.10.122] => (item=/data1/deploy/conf)
ok: [192.168.10.121] => (item=/data1/deploy/backup)
ok: [192.168.10.123] => (item=/data1/deploy/backup)
ok: [192.168.10.131] => (item=/data1/deploy/backup)
ok: [192.168.10.122] => (item=/data1/deploy/backup)

TASK [common_dir : create status directory] ********************************************************************************
ok: [192.168.10.111] => (item=/data1/deploy/status)
ok: [192.168.10.112] => (item=/data1/deploy/status)
ok: [192.168.10.113] => (item=/data1/deploy/status)

TASK [common_dir : create deploy binary directory] *************************************************************************
ok: [192.168.10.111] => (item=/data1/deploy/bin)
ok: [192.168.10.102] => (item=/data1/deploy/bin)
ok: [192.168.10.101] => (item=/data1/deploy/bin)
ok: [192.168.10.113] => (item=/data1/deploy/bin)
ok: [192.168.10.112] => (item=/data1/deploy/bin)
ok: [192.168.10.123] => (item=/data1/deploy/bin)
ok: [192.168.10.121] => (item=/data1/deploy/bin)
ok: [192.168.10.122] => (item=/data1/deploy/bin)
ok: [192.168.10.131] => (item=/data1/deploy/bin)

TASK [common_dir : create docker image directory] **************************************************************************

TASK [collect_diagnosis : deploy collect-log.py script] ********************************************************************
changed: [192.168.10.102] => (item=collect-log.py)
changed: [192.168.10.113] => (item=collect-log.py)
changed: [192.168.10.111] => (item=collect-log.py)
changed: [192.168.10.101] => (item=collect-log.py)
changed: [192.168.10.112] => (item=collect-log.py)
changed: [192.168.10.121] => (item=collect-log.py)
changed: [192.168.10.122] => (item=collect-log.py)
changed: [192.168.10.123] => (item=collect-log.py)
changed: [192.168.10.131] => (item=collect-log.py)

PLAY [deploying alertmanager] **********************************************************************************************

TASK [common_dir : create deploy directories] ******************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/scripts)
ok: [192.168.10.131] => (item=/data1/deploy/conf)
ok: [192.168.10.131] => (item=/data1/deploy/backup)

TASK [common_dir : create status directory] ********************************************************************************

TASK [common_dir : create deploy binary directory] *************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/bin)

TASK [common_dir : create docker image directory] **************************************************************************

TASK [alertmanager : include_tasks] ****************************************************************************************
included: /home/tidb/tidb-ansible/roles/alertmanager/tasks/binary_deployment.yml for 192.168.10.131

TASK [alertmanager : create deploy directories] ****************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/log)
changed: [192.168.10.131] => (item=/data1/deploy/data.alertmanager)

TASK [alertmanager : deploy alertmanager binary] ***************************************************************************
changed: [192.168.10.131]

TASK [alertmanager : create run script] ************************************************************************************
changed: [192.168.10.131] => (item=run)

TASK [alertmanager : include_tasks] ****************************************************************************************
included: /home/tidb/tidb-ansible/roles/alertmanager/tasks/systemd_deployment.yml for 192.168.10.131

TASK [alertmanager : deploy systemd] ***************************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.131]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.131] => (item=start)
changed: [192.168.10.131] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.131]

TASK [alertmanager : create configuration file] ****************************************************************************
changed: [192.168.10.131]

TASK [alertmanager : backup conf file] *************************************************************************************

TASK [alertmanager : prepare firewalld white list] *************************************************************************
ok: [192.168.10.131]

PLAY [deploying monitoring server] *****************************************************************************************

TASK [common_dir : create deploy directories] ******************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/scripts)
ok: [192.168.10.131] => (item=/data1/deploy/conf)
ok: [192.168.10.131] => (item=/data1/deploy/backup)

TASK [common_dir : create status directory] ********************************************************************************

TASK [common_dir : create deploy binary directory] *************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/bin)

TASK [common_dir : create docker image directory] **************************************************************************

TASK [pushgateway : include_tasks] *****************************************************************************************
included: /home/tidb/tidb-ansible/roles/pushgateway/tasks/binary_deployment.yml for 192.168.10.131

TASK [pushgateway : create deploy directories] *****************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/log)

TASK [pushgateway : deploy pushgateway binary] *****************************************************************************
changed: [192.168.10.131]

TASK [pushgateway : create run script] *************************************************************************************
changed: [192.168.10.131] => (item=run)

TASK [pushgateway : include_tasks] *****************************************************************************************
included: /home/tidb/tidb-ansible/roles/pushgateway/tasks/systemd_deployment.yml for 192.168.10.131

TASK [pushgateway : deploy systemd] ****************************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.131]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.131] => (item=start)
changed: [192.168.10.131] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.131]

TASK [pushgateway : prepare firewalld white list] **************************************************************************
ok: [192.168.10.131]

TASK [prometheus : create deploy directories] ******************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/log)
changed: [192.168.10.131] => (item=/data1/deploy/prometheus2.0.0.data.metrics)
changed: [192.168.10.131] => (item=/data1/deploy/status/prometheus)

TASK [prometheus : create configuration file] ******************************************************************************
changed: [192.168.10.131]

TASK [prometheus : backup configuration file] ******************************************************************************

TASK [prometheus : copy alert rules file] **********************************************************************************
changed: [192.168.10.131] => (item=node.rules.yml)
changed: [192.168.10.131] => (item=bypass.rules.yml)
changed: [192.168.10.131] => (item=pd.rules.yml)
changed: [192.168.10.131] => (item=tidb.rules.yml)
changed: [192.168.10.131] => (item=tikv.rules.yml)
changed: [192.168.10.131] => (item=binlog.rules.yml)
changed: [192.168.10.131] => (item=blacker.rules.yml)
changed: [192.168.10.131] => (item=kafka.rules.yml)

TASK [prometheus : backup alert rules file] ********************************************************************************

TASK [prometheus : set alert rules label changes] **************************************************************************
changed: [192.168.10.131] => (item=node.rules.yml)
ok: [192.168.10.131] => (item=bypass.rules.yml)
changed: [192.168.10.131] => (item=pd.rules.yml)
changed: [192.168.10.131] => (item=tidb.rules.yml)
changed: [192.168.10.131] => (item=tikv.rules.yml)
changed: [192.168.10.131] => (item=binlog.rules.yml)
changed: [192.168.10.131] => (item=blacker.rules.yml)
changed: [192.168.10.131] => (item=kafka.rules.yml)

TASK [prometheus : include_tasks] ******************************************************************************************
included: /home/tidb/tidb-ansible/roles/prometheus/tasks/binary_deployment.yml for 192.168.10.131

TASK [prometheus : deploy prometheus binary] *******************************************************************************
changed: [192.168.10.131]

TASK [prometheus : create run script] **************************************************************************************
changed: [192.168.10.131] => (item=run)

TASK [prometheus : include_tasks] ******************************************************************************************
included: /home/tidb/tidb-ansible/roles/prometheus/tasks/systemd_deployment.yml for 192.168.10.131

TASK [prometheus : deploy systemd] *****************************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.131]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.131] => (item=start)
changed: [192.168.10.131] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.131]

TASK [prometheus : prepare firewalld white list] ***************************************************************************
ok: [192.168.10.131]

PLAY [deploying grafana] ***************************************************************************************************

TASK [common_dir : create deploy directories] ******************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/scripts)
ok: [192.168.10.131] => (item=/data1/deploy/conf)
ok: [192.168.10.131] => (item=/data1/deploy/backup)

TASK [common_dir : create status directory] ********************************************************************************

TASK [common_dir : create deploy binary directory] *************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/bin)

TASK [common_dir : create docker image directory] **************************************************************************

TASK [grafana : include_tasks] *********************************************************************************************
included: /home/tidb/tidb-ansible/roles/grafana/tasks/tasks.yml for 192.168.10.131

TASK [grafana : create common deploy directories] **************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/log)
changed: [192.168.10.131] => (item=/data1/deploy/data.grafana)

TASK [grafana : push data source file] *************************************************************************************
changed: [192.168.10.131]

TASK [grafana : include_tasks] *********************************************************************************************
included: /home/tidb/tidb-ansible/roles/grafana/tasks/binary_deployment.yml for 192.168.10.131

TASK [grafana : create binary deploy directories (1/2)] ********************************************************************
changed: [192.168.10.131] => (item=/data1/deploy/opt)

TASK [grafana : deploy grafana binary] *************************************************************************************
changed: [192.168.10.131]

TASK [grafana : rename grafana deploy dir] *********************************************************************************
changed: [192.168.10.131]

TASK [grafana : create binary deploy directories (2/2)] ********************************************************************
changed: [192.168.10.131] => (item=/data1/deploy/opt/grafana/dashboards)
changed: [192.168.10.131] => (item=/data1/deploy/opt/grafana/plugins)

TASK [grafana : create grafana configuration file] *************************************************************************
changed: [192.168.10.131]

TASK [grafana : create run script] *****************************************************************************************
changed: [192.168.10.131] => (item=run)

TASK [grafana : include_tasks] *********************************************************************************************
included: /home/tidb/tidb-ansible/roles/grafana/tasks/systemd_deployment.yml for 192.168.10.131

TASK [grafana : deploy systemd] ********************************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.131]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.131] => (item=start)
changed: [192.168.10.131] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.131]

TASK [grafana : prepare firewalld white list] ******************************************************************************
ok: [192.168.10.131]

TASK [common_dir : create deploy directories] ******************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/scripts)
ok: [192.168.10.131] => (item=/data1/deploy/conf)
ok: [192.168.10.131] => (item=/data1/deploy/backup)

TASK [common_dir : create status directory] ********************************************************************************

TASK [common_dir : create deploy binary directory] *************************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/bin)

TASK [common_dir : create docker image directory] **************************************************************************

TASK [grafana_collector : include_tasks] ***********************************************************************************
included: /home/tidb/tidb-ansible/roles/grafana_collector/tasks/binary_deployment.yml for 192.168.10.131

TASK [grafana_collector : create deploy directories] ***********************************************************************
ok: [192.168.10.131] => (item=/data1/deploy/log)
changed: [192.168.10.131] => (item=/data1/deploy/conf/fonts)

TASK [grafana_collector : deploy grafana_collector binary] *****************************************************************
changed: [192.168.10.131]

TASK [grafana_collector : deploy ttf font] *********************************************************************************
changed: [192.168.10.131]

TASK [grafana_collector : create run script] *******************************************************************************
changed: [192.168.10.131] => (item=run)

TASK [grafana_collector : include_tasks] ***********************************************************************************
included: /home/tidb/tidb-ansible/roles/grafana_collector/tasks/systemd_deployment.yml for 192.168.10.131

TASK [grafana_collector : deploy systemd] **********************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.131]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.131] => (item=start)
changed: [192.168.10.131] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.131]

TASK [grafana_collector : create configuration file] ***********************************************************************
changed: [192.168.10.131]

TASK [grafana_collector : backup conf file] ********************************************************************************

TASK [grafana_collector : prepare firewalld white list] ********************************************************************
ok: [192.168.10.131]
 [WARNING]: Could not match supplied host pattern, ignoring: kafka_exporter_servers

PLAY [deploying kafka_exporter] ********************************************************************************************
skipping: no hosts matched

PLAY [deploying PD cluster] ************************************************************************************************

TASK [common_dir : create deploy directories] ******************************************************************************
ok: [192.168.10.121] => (item=/data1/deploy/scripts)
ok: [192.168.10.122] => (item=/data1/deploy/scripts)
ok: [192.168.10.123] => (item=/data1/deploy/scripts)
ok: [192.168.10.122] => (item=/data1/deploy/conf)
ok: [192.168.10.121] => (item=/data1/deploy/conf)
ok: [192.168.10.123] => (item=/data1/deploy/conf)
ok: [192.168.10.121] => (item=/data1/deploy/backup)
ok: [192.168.10.122] => (item=/data1/deploy/backup)
ok: [192.168.10.123] => (item=/data1/deploy/backup)

TASK [common_dir : create status directory] ********************************************************************************

TASK [common_dir : create deploy binary directory] *************************************************************************
ok: [192.168.10.122] => (item=/data1/deploy/bin)
ok: [192.168.10.121] => (item=/data1/deploy/bin)
ok: [192.168.10.123] => (item=/data1/deploy/bin)

TASK [common_dir : create docker image directory] **************************************************************************

TASK [pd : create deploy directories] **************************************************************************************
ok: [192.168.10.121] => (item=/data1/deploy/log)
ok: [192.168.10.122] => (item=/data1/deploy/log)
ok: [192.168.10.123] => (item=/data1/deploy/log)
ok: [192.168.10.122] => (item=/data1/deploy/conf)
ok: [192.168.10.121] => (item=/data1/deploy/conf)
ok: [192.168.10.123] => (item=/data1/deploy/conf)
changed: [192.168.10.121] => (item=/data1/deploy/data.pd)
changed: [192.168.10.122] => (item=/data1/deploy/data.pd)
changed: [192.168.10.123] => (item=/data1/deploy/data.pd)

TASK [pd : include_tasks] **************************************************************************************************

TASK [pd : include_tasks] **************************************************************************************************

TASK [pd : include_tasks] **************************************************************************************************

TASK [pd : load customized config] *****************************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]

TASK [pd : load default config] ********************************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]

TASK [pd : generate dynamic config] ****************************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]

TASK [pd : generate final config] ******************************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]

TASK [pd : debug] **********************************************************************************************************
ok: [192.168.10.121] => {
    "pd_conf": {
        "global": {
            "lease": 3, 
            "namespace-classifier": "table", 
            "tso-save-interval": "3s"
        }, 
        "log": {
            "file": {}, 
            "level": "info"
        }, 
        "metric": {
            "address": "192.168.10.131:9091", 
            "interval": "15s"
        }, 
        "replication": {
            "location-labels": [], 
            "max-replicas": 3
        }, 
        "schedule": {
            "leader-schedule-limit": 4, 
            "max-merge-region-size": 0, 
            "max-pending-peer-count": 16, 
            "max-snapshot-count": 3, 
            "max-store-down-time": "30m", 
            "merge-schedule-limit": 8, 
            "region-schedule-limit": 4, 
            "replica-schedule-limit": 8, 
            "split-merge-interval": "1h", 
            "tolerant-size-ratio": 5.0
        }, 
        "security": {
            "cacert-path": "", 
            "cert-path": "", 
            "key-path": ""
        }
    }
}
ok: [192.168.10.122] => {
    "pd_conf": {
        "global": {
            "lease": 3, 
            "namespace-classifier": "table", 
            "tso-save-interval": "3s"
        }, 
        "log": {
            "file": {}, 
            "level": "info"
        }, 
        "metric": {
            "address": "192.168.10.131:9091", 
            "interval": "15s"
        }, 
        "replication": {
            "location-labels": [], 
            "max-replicas": 3
        }, 
        "schedule": {
            "leader-schedule-limit": 4, 
            "max-merge-region-size": 0, 
            "max-pending-peer-count": 16, 
            "max-snapshot-count": 3, 
            "max-store-down-time": "30m", 
            "merge-schedule-limit": 8, 
            "region-schedule-limit": 4, 
            "replica-schedule-limit": 8, 
            "split-merge-interval": "1h", 
            "tolerant-size-ratio": 5.0
        }, 
        "security": {
            "cacert-path": "", 
            "cert-path": "", 
            "key-path": ""
        }
    }
}
ok: [192.168.10.123] => {
    "pd_conf": {
        "global": {
            "lease": 3, 
            "namespace-classifier": "table", 
            "tso-save-interval": "3s"
        }, 
        "log": {
            "file": {}, 
            "level": "info"
        }, 
        "metric": {
            "address": "192.168.10.131:9091", 
            "interval": "15s"
        }, 
        "replication": {
            "location-labels": [], 
            "max-replicas": 3
        }, 
        "schedule": {
            "leader-schedule-limit": 4, 
            "max-merge-region-size": 0, 
            "max-pending-peer-count": 16, 
            "max-snapshot-count": 3, 
            "max-store-down-time": "30m", 
            "merge-schedule-limit": 8, 
            "region-schedule-limit": 4, 
            "replica-schedule-limit": 8, 
            "split-merge-interval": "1h", 
            "tolerant-size-ratio": 5.0
        }, 
        "security": {
            "cacert-path": "", 
            "cert-path": "", 
            "key-path": ""
        }
    }
}

TASK [pd : create configuration file] **************************************************************************************
changed: [192.168.10.121]
changed: [192.168.10.123]
changed: [192.168.10.122]

TASK [pd : backup conf file] ***********************************************************************************************

TASK [pd : include_tasks] **************************************************************************************************
included: /home/tidb/tidb-ansible/roles/pd/tasks/binary_deployment.yml for 192.168.10.121, 192.168.10.122, 192.168.10.123

TASK [pd : deploy binary] **************************************************************************************************
changed: [192.168.10.121]
changed: [192.168.10.123]
changed: [192.168.10.122]

TASK [pd : backup bianry file] *********************************************************************************************

TASK [pd : create startup script] ******************************************************************************************
changed: [192.168.10.121] => (item=run)
changed: [192.168.10.122] => (item=run)
changed: [192.168.10.123] => (item=run)

TASK [pd : include_tasks] **************************************************************************************************
included: /home/tidb/tidb-ansible/roles/pd/tasks/systemd_deployment.yml for 192.168.10.121, 192.168.10.122, 192.168.10.123

TASK [pd : deploy systemd] *************************************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.121]
changed: [192.168.10.122]
changed: [192.168.10.123]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.123] => (item=start)
changed: [192.168.10.122] => (item=start)
changed: [192.168.10.121] => (item=start)
changed: [192.168.10.123] => (item=stop)
changed: [192.168.10.121] => (item=stop)
changed: [192.168.10.122] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.122]
changed: [192.168.10.123]
changed: [192.168.10.121]

TASK [pd : prepare firewalld white list] ***********************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]

PLAY [deploying TiKV cluster] **********************************************************************************************

TASK [common_dir : create deploy directories] ******************************************************************************
ok: [192.168.10.111] => (item=/data1/deploy/scripts)
ok: [192.168.10.113] => (item=/data1/deploy/scripts)
ok: [192.168.10.112] => (item=/data1/deploy/scripts)
ok: [192.168.10.111] => (item=/data1/deploy/conf)
ok: [192.168.10.112] => (item=/data1/deploy/conf)
ok: [192.168.10.113] => (item=/data1/deploy/conf)
ok: [192.168.10.111] => (item=/data1/deploy/backup)
ok: [192.168.10.113] => (item=/data1/deploy/backup)
ok: [192.168.10.112] => (item=/data1/deploy/backup)

TASK [common_dir : create status directory] ********************************************************************************
ok: [192.168.10.111] => (item=/data1/deploy/status)
ok: [192.168.10.113] => (item=/data1/deploy/status)
ok: [192.168.10.112] => (item=/data1/deploy/status)

TASK [common_dir : create deploy binary directory] *************************************************************************
ok: [192.168.10.112] => (item=/data1/deploy/bin)
ok: [192.168.10.111] => (item=/data1/deploy/bin)
ok: [192.168.10.113] => (item=/data1/deploy/bin)

TASK [common_dir : create docker image directory] **************************************************************************

TASK [tikv : create deploy directories] ************************************************************************************
ok: [192.168.10.113] => (item=/data1/deploy/log)
ok: [192.168.10.111] => (item=/data1/deploy/log)
ok: [192.168.10.112] => (item=/data1/deploy/log)
changed: [192.168.10.111] => (item=/data1/deploy/data)
changed: [192.168.10.113] => (item=/data1/deploy/data)
changed: [192.168.10.112] => (item=/data1/deploy/data)
ok: [192.168.10.113] => (item=/data1/deploy/conf)
ok: [192.168.10.111] => (item=/data1/deploy/conf)
ok: [192.168.10.112] => (item=/data1/deploy/conf)

TASK [tikv : include_tasks] ************************************************************************************************

TASK [tikv : include_tasks] ************************************************************************************************

TASK [tikv : include_tasks] ************************************************************************************************

TASK [tikv : load customized config] ***************************************************************************************
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [tikv : load default config] ******************************************************************************************
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [tikv : generate dynamic config] **************************************************************************************
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [tikv : generate final config] ****************************************************************************************
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [tikv : debug] ********************************************************************************************************
ok: [192.168.10.111] => {
    "tikv_conf": {
        "coprocessor": {}, 
        "global": {}, 
        "import": {}, 
        "metric": {
            "address": "192.168.10.131:9091", 
            "interval": "15s", 
            "job": "tikv"
        }, 
        "pd": {}, 
        "raftdb": {
            "defaultcf": {}
        }, 
        "raftstore": {
            "raftdb-path": "", 
            "sync-log": true
        }, 
        "readpool": {
            "coprocessor": {}, 
            "storage": {}
        }, 
        "rocksdb": {
            "defaultcf": {}, 
            "lockcf": {}, 
            "wal-dir": "", 
            "writecf": {}
        }, 
        "security": {
            "ca-path": "", 
            "cert-path": "", 
            "key-path": ""
        }, 
        "server": {
            "labels": {}
        }, 
        "storage": {}
    }
}
ok: [192.168.10.112] => {
    "tikv_conf": {
        "coprocessor": {}, 
        "global": {}, 
        "import": {}, 
        "metric": {
            "address": "192.168.10.131:9091", 
            "interval": "15s", 
            "job": "tikv"
        }, 
        "pd": {}, 
        "raftdb": {
            "defaultcf": {}
        }, 
        "raftstore": {
            "raftdb-path": "", 
            "sync-log": true
        }, 
        "readpool": {
            "coprocessor": {}, 
            "storage": {}
        }, 
        "rocksdb": {
            "defaultcf": {}, 
            "lockcf": {}, 
            "wal-dir": "", 
            "writecf": {}
        }, 
        "security": {
            "ca-path": "", 
            "cert-path": "", 
            "key-path": ""
        }, 
        "server": {
            "labels": {}
        }, 
        "storage": {}
    }
}
ok: [192.168.10.113] => {
    "tikv_conf": {
        "coprocessor": {}, 
        "global": {}, 
        "import": {}, 
        "metric": {
            "address": "192.168.10.131:9091", 
            "interval": "15s", 
            "job": "tikv"
        }, 
        "pd": {}, 
        "raftdb": {
            "defaultcf": {}
        }, 
        "raftstore": {
            "raftdb-path": "", 
            "sync-log": true
        }, 
        "readpool": {
            "coprocessor": {}, 
            "storage": {}
        }, 
        "rocksdb": {
            "defaultcf": {}, 
            "lockcf": {}, 
            "wal-dir": "", 
            "writecf": {}
        }, 
        "security": {
            "ca-path": "", 
            "cert-path": "", 
            "key-path": ""
        }, 
        "server": {
            "labels": {}
        }, 
        "storage": {}
    }
}

TASK [tikv : create config file] *******************************************************************************************
changed: [192.168.10.112]
changed: [192.168.10.111]
changed: [192.168.10.113]

TASK [tikv : backup conf file] *********************************************************************************************

TASK [tikv : include_tasks] ************************************************************************************************
included: /home/tidb/tidb-ansible/roles/tikv/tasks/binary_deployment.yml for 192.168.10.111, 192.168.10.112, 192.168.10.113

TASK [tikv : deploy binary] ************************************************************************************************
changed: [192.168.10.112]
changed: [192.168.10.111]
changed: [192.168.10.113]

TASK [tikv : backup bianry file] *******************************************************************************************

TASK [tikv : create run script] ********************************************************************************************
changed: [192.168.10.111] => (item=run)
changed: [192.168.10.112] => (item=run)
changed: [192.168.10.113] => (item=run)

TASK [tikv : backup script file] *******************************************************************************************

TASK [tikv : include_tasks] ************************************************************************************************
included: /home/tidb/tidb-ansible/roles/tikv/tasks/systemd_deployment.yml for 192.168.10.111, 192.168.10.112, 192.168.10.113

TASK [tikv : deploy systemd] ***********************************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.112]
changed: [192.168.10.111]
changed: [192.168.10.113]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.113] => (item=start)
changed: [192.168.10.111] => (item=start)
changed: [192.168.10.112] => (item=start)
changed: [192.168.10.112] => (item=stop)
changed: [192.168.10.111] => (item=stop)
changed: [192.168.10.113] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.111]
changed: [192.168.10.112]
changed: [192.168.10.113]

TASK [tikv : prepare firewalld white list] *********************************************************************************
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

PLAY [deploying TiDB cluster] **********************************************************************************************

TASK [common_dir : create deploy directories] ******************************************************************************

TASK [common_dir : create status directory] ********************************************************************************

TASK [common_dir : create deploy binary directory] *************************************************************************

TASK [common_dir : create docker image directory] **************************************************************************

TASK [pump : create deploy directories] ************************************************************************************

TASK [pump : include_tasks] ************************************************************************************************

TASK [pump : include_tasks] ************************************************************************************************

TASK [pump : include_tasks] ************************************************************************************************

TASK [pump : load customized config] ***************************************************************************************

TASK [pump : load default config] ******************************************************************************************

TASK [pump : generate dynamic config] **************************************************************************************

TASK [pump : generate final config] ****************************************************************************************

TASK [pump : debug] ********************************************************************************************************

TASK [pump : create configuration file] ************************************************************************************

TASK [pump : backup conf file] *********************************************************************************************

TASK [pump : include_tasks] ************************************************************************************************

TASK [pump : prepare firewalld white list] *********************************************************************************

TASK [common_dir : create deploy directories] ******************************************************************************
ok: [192.168.10.101] => (item=/data1/deploy/scripts)
ok: [192.168.10.102] => (item=/data1/deploy/scripts)
ok: [192.168.10.101] => (item=/data1/deploy/conf)
ok: [192.168.10.102] => (item=/data1/deploy/conf)
ok: [192.168.10.101] => (item=/data1/deploy/backup)
ok: [192.168.10.102] => (item=/data1/deploy/backup)

TASK [common_dir : create status directory] ********************************************************************************

TASK [common_dir : create deploy binary directory] *************************************************************************
ok: [192.168.10.101] => (item=/data1/deploy/bin)
ok: [192.168.10.102] => (item=/data1/deploy/bin)

TASK [common_dir : create docker image directory] **************************************************************************

TASK [tidb : create deploy directories] ************************************************************************************
ok: [192.168.10.101] => (item=/data1/deploy/log)
ok: [192.168.10.102] => (item=/data1/deploy/log)
ok: [192.168.10.101] => (item=/data1/deploy/conf)
ok: [192.168.10.102] => (item=/data1/deploy/conf)

TASK [tidb : include_tasks] ************************************************************************************************

TASK [tidb : include_tasks] ************************************************************************************************

TASK [tidb : include_tasks] ************************************************************************************************

TASK [tidb : load customized config] ***************************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [tidb : load default config] ******************************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [tidb : generate dynamic config] **************************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [tidb : combine final config] *****************************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [tidb : debug] ********************************************************************************************************
ok: [192.168.10.101] => {
    "tidb_conf": {
        "binlog": {
            "ignore-error": false, 
            "write-timeout": "15s"
        }, 
        "global": {
            "enable-streaming": false, 
            "host": "0.0.0.0", 
            "lease": "45s", 
            "lower-case-table-names": 2, 
            "oom-action": "log", 
            "run-ddl": true, 
            "socket": "", 
            "split-table": true, 
            "store": "tikv", 
            "token-limit": 1000
        }, 
        "log": {
            "disable-timestamp": false, 
            "expensive-threshold": 10000, 
            "file": {
                "log-rotate": true, 
                "max-backups": 0, 
                "max-days": 0, 
                "max-size": 300
            }, 
            "format": "text", 
            "level": "info", 
            "query-log-max-len": 2048, 
            "slow-threshold": 300
        }, 
        "opentracing": {
            "enable": false, 
            "reporter": {
                "buffer-flush-interval": 0, 
                "local-agent-host-port": "", 
                "log-spans": false, 
                "queue-size": 0
            }, 
            "rpc-metrics": false, 
            "sampler": {
                "max-operations": 0, 
                "param": 1.0, 
                "sampling-refresh-interval": 0, 
                "sampling-server-url": "", 
                "type": "const"
            }
        }, 
        "performance": {
            "cross-join": true, 
            "feedback-probability": 0.05, 
            "max-procs": 0, 
            "pseudo-estimate-ratio": 0.8, 
            "query-feedback-limit": 1024, 
            "run-auto-analyze": true, 
            "stats-lease": "3s", 
            "stmt-count-limit": 5000, 
            "tcp-keep-alive": true
        }, 
        "plan_cache": {
            "capacity": 2560, 
            "enabled": false, 
            "shards": 256
        }, 
        "prepared_plan_cache": {
            "capacity": 100, 
            "enabled": false
        }, 
        "proxy_protocol": {
            "header-timeout": 5, 
            "networks": ""
        }, 
        "security": {
            "cluster-ssl-ca": "", 
            "cluster-ssl-cert": "", 
            "cluster-ssl-key": "", 
            "ssl-ca": "", 
            "ssl-cert": "", 
            "ssl-key": ""
        }, 
        "status": {
            "metrics-addr": "192.168.10.131:9091", 
            "metrics-interval": 15, 
            "report-status": true
        }, 
        "tikv_client": {
            "commit-timeout": "41s", 
            "grpc-connection-count": 16
        }, 
        "txn_local_latches": {
            "capacity": 1024000, 
            "enabled": false
        }
    }
}
ok: [192.168.10.102] => {
    "tidb_conf": {
        "binlog": {
            "ignore-error": false, 
            "write-timeout": "15s"
        }, 
        "global": {
            "enable-streaming": false, 
            "host": "0.0.0.0", 
            "lease": "45s", 
            "lower-case-table-names": 2, 
            "oom-action": "log", 
            "run-ddl": true, 
            "socket": "", 
            "split-table": true, 
            "store": "tikv", 
            "token-limit": 1000
        }, 
        "log": {
            "disable-timestamp": false, 
            "expensive-threshold": 10000, 
            "file": {
                "log-rotate": true, 
                "max-backups": 0, 
                "max-days": 0, 
                "max-size": 300
            }, 
            "format": "text", 
            "level": "info", 
            "query-log-max-len": 2048, 
            "slow-threshold": 300
        }, 
        "opentracing": {
            "enable": false, 
            "reporter": {
                "buffer-flush-interval": 0, 
                "local-agent-host-port": "", 
                "log-spans": false, 
                "queue-size": 0
            }, 
            "rpc-metrics": false, 
            "sampler": {
                "max-operations": 0, 
                "param": 1.0, 
                "sampling-refresh-interval": 0, 
                "sampling-server-url": "", 
                "type": "const"
            }
        }, 
        "performance": {
            "cross-join": true, 
            "feedback-probability": 0.05, 
            "max-procs": 0, 
            "pseudo-estimate-ratio": 0.8, 
            "query-feedback-limit": 1024, 
            "run-auto-analyze": true, 
            "stats-lease": "3s", 
            "stmt-count-limit": 5000, 
            "tcp-keep-alive": true
        }, 
        "plan_cache": {
            "capacity": 2560, 
            "enabled": false, 
            "shards": 256
        }, 
        "prepared_plan_cache": {
            "capacity": 100, 
            "enabled": false
        }, 
        "proxy_protocol": {
            "header-timeout": 5, 
            "networks": ""
        }, 
        "security": {
            "cluster-ssl-ca": "", 
            "cluster-ssl-cert": "", 
            "cluster-ssl-key": "", 
            "ssl-ca": "", 
            "ssl-cert": "", 
            "ssl-key": ""
        }, 
        "status": {
            "metrics-addr": "192.168.10.131:9091", 
            "metrics-interval": 15, 
            "report-status": true
        }, 
        "tikv_client": {
            "commit-timeout": "41s", 
            "grpc-connection-count": 16
        }, 
        "txn_local_latches": {
            "capacity": 1024000, 
            "enabled": false
        }
    }
}

TASK [tidb : create config file] *******************************************************************************************
changed: [192.168.10.101]
changed: [192.168.10.102]

TASK [tidb : backup conf file] *********************************************************************************************

TASK [tidb : include_tasks] ************************************************************************************************
included: /home/tidb/tidb-ansible/roles/tidb/tasks/binary_deployment.yml for 192.168.10.101, 192.168.10.102

TASK [tidb : deploy binary] ************************************************************************************************
changed: [192.168.10.102]
changed: [192.168.10.101]

TASK [tidb : backup binary file] *******************************************************************************************

TASK [tidb : create run script] ********************************************************************************************
changed: [192.168.10.101] => (item=run)
changed: [192.168.10.102] => (item=run)

TASK [tidb : backup script file] *******************************************************************************************

TASK [tidb : include_tasks] ************************************************************************************************
included: /home/tidb/tidb-ansible/roles/tidb/tasks/systemd_deployment.yml for 192.168.10.101, 192.168.10.102

TASK [tidb : deploy systemd] ***********************************************************************************************

TASK [systemd : create systemd service configuration] **********************************************************************
changed: [192.168.10.101]
changed: [192.168.10.102]

TASK [systemd : create startup script - common start/stop] *****************************************************************
changed: [192.168.10.101] => (item=start)
changed: [192.168.10.102] => (item=start)
changed: [192.168.10.101] => (item=stop)
changed: [192.168.10.102] => (item=stop)

TASK [systemd : reload systemd] ********************************************************************************************
changed: [192.168.10.101]
changed: [192.168.10.102]

TASK [tidb : prepare firewalld white list] *********************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [tispark : deploy spark-2.1.1-bin-hadoop2.7] **************************************************************************
changed: [192.168.10.101]
changed: [192.168.10.102]

TASK [tispark : rename spark deploy dir] ***********************************************************************************
changed: [192.168.10.101]
changed: [192.168.10.102]

TASK [tispark : deploy tispark] ********************************************************************************************
changed: [192.168.10.102]
changed: [192.168.10.101]

TASK [tispark : include_tasks] *********************************************************************************************
 [WARNING]: Could not match supplied host pattern, ignoring: spark_master
 [WARNING]: Could not match supplied host pattern, ignoring: spark_slaves

PLAY [deploying tispark cluster] *******************************************************************************************
skipping: no hosts matched

PLAY [finalizing deployment target] ****************************************************************************************

TASK [firewalld : All enabled ports] ***************************************************************************************

TASK [firewalld : determine if firewalld is running] ***********************************************************************

TASK [firewalld : enable firewalld ports] **********************************************************************************

TASK [firewalld : reload firewalld] ****************************************************************************************

PLAY RECAP *****************************************************************************************************************
192.168.10.101             : ok=55   changed=25   unreachable=0    failed=0   
192.168.10.102             : ok=55   changed=25   unreachable=0    failed=0   
192.168.10.111             : ok=55   changed=24   unreachable=0    failed=0   
192.168.10.112             : ok=55   changed=24   unreachable=0    failed=0   
192.168.10.113             : ok=55   changed=24   unreachable=0    failed=0   
192.168.10.121             : ok=52   changed=23   unreachable=0    failed=0   
192.168.10.122             : ok=52   changed=23   unreachable=0    failed=0   
192.168.10.123             : ok=52   changed=23   unreachable=0    failed=0   
192.168.10.131             : ok=100  changed=56   unreachable=0    failed=0   
localhost                  : ok=1    changed=0    unreachable=0    failed=0   

Congrats! All goes well. :-)
[tidb@contoso200 tidb-ansible]$ 

 

[tidb@contoso200 tidb-ansible]$ ansible-playbook start.yml

 

PLAY [check config locally] *************************************************************************************************

TASK [check_config_static : Ensure only one monitoring host exists] *********************************************************

TASK [check_config_static : Warn if TiDB host not exists] *******************************************************************

TASK [check_config_static : Ensure zookeeper address of kafka cluster is set when enable_binlog] ****************************

TASK [check_config_static : Ensure PD host exists] **************************************************************************

TASK [check_config_static : Ensure TiKV host exists] ************************************************************************

TASK [check_config_static : Ensure enable_tls is disabled when deployment_method is docker] *********************************

TASK [check_config_static : Check ansible_user variable] ********************************************************************

TASK [check_config_static : Close old control master] ***********************************************************************
ok: [localhost]

PLAY [gather all facts, and check dest] *************************************************************************************

TASK [check_config_dynamic : Disk space check - Fail task when disk is full] ************************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [check_config_dynamic : get facts] *************************************************************************************
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.121]
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_config_dynamic : Preflight check - Set NTP service status] ******************************************************
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.111]
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [check_config_dynamic : Preflight check - NTP service] *****************************************************************

TASK [check_config_dynamic : Set enable_binlog - set default] ***************************************************************

TASK [check_config_dynamic : Set enable_binlog - set inverse] ***************************************************************

TASK [check_config_dynamic : Set deploy_user - set ansible_user as default] *************************************************
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [check_config_dynamic : Set deploy_dir if not presented] ***************************************************************

TASK [check_config_dynamic : Preflight check - Get umask] *******************************************************************
ok: [192.168.10.111]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.112]
ok: [192.168.10.101]
ok: [192.168.10.113]
ok: [192.168.10.102]

TASK [check_config_dynamic : Preflight check - Get Hard Max FD limit] *******************************************************
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.113]

TASK [check_config_dynamic : Preflight check - Does the system have a standard umask?] **************************************

TASK [check_config_dynamic : Preflight check - ulimit -n] *******************************************************************

TASK [check_config_dynamic : Preflight check - Get deploy dir permissions] **************************************************
ok: [192.168.10.111]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.113]
ok: [192.168.10.112]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [check_config_dynamic : Preflight check - Does deploy dir have appropriate permissions?] *******************************

TASK [check_config_dynamic : Check swap] ************************************************************************************

TASK [check_config_dynamic : environment check (deploy dir)] ****************************************************************
ok: [192.168.10.131]
ok: [192.168.10.123]
ok: [192.168.10.122]
ok: [192.168.10.111]
ok: [192.168.10.121]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.102]
ok: [192.168.10.101]

TASK [check_config_dynamic : environment check (supervise)] *****************************************************************

TASK [check_config_dynamic : config skip variables (default)] ***************************************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [check_config_dynamic : config skip variables] *************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [check_config_dynamic : config skip variables] *************************************************************************

PLAY [monitored_servers] ****************************************************************************************************

TASK [start node_exporter by supervise] *************************************************************************************

TASK [start node_exporter by systemd] ***************************************************************************************
changed: [192.168.10.113]
changed: [192.168.10.101]
changed: [192.168.10.102]
changed: [192.168.10.111]
changed: [192.168.10.112]
changed: [192.168.10.122]
changed: [192.168.10.123]
changed: [192.168.10.121]
changed: [192.168.10.131]

TASK [wait for node_exporter up] ********************************************************************************************
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.111]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.131]

PLAY [monitored_servers] ****************************************************************************************************

TASK [start blackbox_exporter by supervise] *********************************************************************************

TASK [start blackbox_exporter by systemd] ***********************************************************************************
changed: [192.168.10.112]
changed: [192.168.10.101]
changed: [192.168.10.111]
changed: [192.168.10.102]
changed: [192.168.10.113]
changed: [192.168.10.121]
changed: [192.168.10.122]
changed: [192.168.10.123]
changed: [192.168.10.131]

TASK [wait for blackbox_exporter up] ****************************************************************************************
ok: [192.168.10.112]
ok: [192.168.10.111]
ok: [192.168.10.113]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.122]
ok: [192.168.10.131]
ok: [192.168.10.121]
ok: [192.168.10.123]

PLAY [alertmanager_servers] *************************************************************************************************

TASK [start alertmanager by supervise] **************************************************************************************

TASK [start alertmanager by systemd] ****************************************************************************************
changed: [192.168.10.131]

TASK [wait for alertmanager up] *********************************************************************************************
ok: [192.168.10.131]

PLAY [monitoring_servers] ***************************************************************************************************

TASK [start monitoring modules by supervise] ********************************************************************************

TASK [start monitoring modules by systemd] **********************************************************************************
changed: [192.168.10.131] => (item=pushgateway-9091.service)
changed: [192.168.10.131] => (item=prometheus-9090.service)

TASK [wait for pushgateway up] **********************************************************************************************
ok: [192.168.10.131]

TASK [wait for prometheus up] ***********************************************************************************************
ok: [192.168.10.131]
 [WARNING]: Could not match supplied host pattern, ignoring: kafka_exporter_servers

PLAY [kafka_exporter_servers] ***********************************************************************************************
skipping: no hosts matched

PLAY [pd_servers] ***********************************************************************************************************

TASK [start PD by supervise] ************************************************************************************************

TASK [start PD by systemd] **************************************************************************************************
changed: [192.168.10.121]
changed: [192.168.10.122]
changed: [192.168.10.123]

TASK [wait for PD up] *******************************************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]

TASK [wait for PD up] *******************************************************************************************************

PLAY [tikv_servers] *********************************************************************************************************

TASK [start TiKV by supervise] **********************************************************************************************

TASK [start TiKV by systemd] ************************************************************************************************
changed: [192.168.10.112]
changed: [192.168.10.113]
changed: [192.168.10.111]

TASK [wait for TiKV up] *****************************************************************************************************
ok: [192.168.10.112]
ok: [192.168.10.111]
ok: [192.168.10.113]

TASK [wait for TiKV up] *****************************************************************************************************

TASK [command] **************************************************************************************************************
ok: [192.168.10.111]
ok: [192.168.10.113]
ok: [192.168.10.112]

TASK [display new tikv pid] *************************************************************************************************
ok: [192.168.10.111] => {
    "msg": "tikv binary or docker pid: 11211"
}
ok: [192.168.10.112] => {
    "msg": "tikv binary or docker pid: 11158"
}
ok: [192.168.10.113] => {
    "msg": "tikv binary or docker pid: 11152"
}

PLAY [tidb_servers] *********************************************************************************************************

TASK [clean pump .node file] ************************************************************************************************

TASK [start pump by supervise] **********************************************************************************************

TASK [start pump by systemd] ************************************************************************************************

TASK [wait for pump up] *****************************************************************************************************

TASK [start TiDB by supervise] **********************************************************************************************

TASK [start TiDB by systemd] ************************************************************************************************
changed: [192.168.10.101]
changed: [192.168.10.102]

TASK [wait for TiDB up] *****************************************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [wait for TiDB up] *****************************************************************************************************

PLAY [grafana_servers] ******************************************************************************************************

TASK [common_dir : create deploy directories] *******************************************************************************

TASK [common_dir : create status directory] *********************************************************************************

TASK [common_dir : create deploy binary directory] **************************************************************************

TASK [common_dir : create docker image directory] ***************************************************************************

TASK [grafana : include_tasks] **********************************************************************************************

TASK [start grafana by supervise] *******************************************************************************************

TASK [start grafana by systemd] *********************************************************************************************
changed: [192.168.10.131]

TASK [wait for grafana up] **************************************************************************************************
ok: [192.168.10.131]

TASK [start grafana_collector by supervise] *********************************************************************************

TASK [start grafana_collector by systemd] ***********************************************************************************
changed: [192.168.10.131]

TASK [wait for grafana_collector up] ****************************************************************************************
ok: [192.168.10.131]

TASK [set_fact] *************************************************************************************************************
ok: [192.168.10.131]

TASK [include_tasks] ********************************************************************************************************
included: /home/tidb/tidb-ansible/create_grafana_api_keys.yml for 192.168.10.131

TASK [Ensure grafana API Key directory exists] ******************************************************************************
changed: [192.168.10.131 -> localhost]

TASK [Check grafana API Key list] *******************************************************************************************
ok: [192.168.10.131]

TASK [Check grafana API Key file existed] ***********************************************************************************
ok: [192.168.10.131]

TASK [set_fact] *************************************************************************************************************

TASK [debug] ****************************************************************************************************************

TASK [Delete grafana API Key when grafana API Key file is missing] **********************************************************

TASK [Create grafana API Key] ***********************************************************************************************
ok: [192.168.10.131] => (item={u'role': u'Admin', u'name': u'grafana_apikey'})

TASK [Create grafana API key file] ******************************************************************************************
changed: [192.168.10.131 -> localhost] => (item={u'status': 200, u'content_length': u'126', u'cookies': {}, u'changed': False, '_ansible_no_log': False, 'item': {u'role': u'Admin', u'name': u'grafana_apikey'}, u'url': u'http://192.168.10.131:3000/api/auth/keys', '_ansible_item_result': True, u'json': {u'name': u'grafana_apikey', u'key': u'eyJrIjoiTUV5RWptdXBVMjMzNG1DNmFRWG5wS2p2OTRORm1DaG0iLCJuIjoiZ3JhZmFuYV9hcGlrZXkiLCJpZCI6MX0='}, 'failed': False, u'connection': u'close', '_ansible_parsed': True, u'content_type': u'application/json', u'invocation': {u'module_args': {u'directory_mode': None, u'force': False, u'remote_src': None, u'follow_redirects': u'safe', u'body_format': u'json', u'owner': None, u'follow': False, u'client_key': None, u'group': None, u'use_proxy': True, u'unsafe_writes': None, u'serole': None, u'content': None, u'setype': None, u'status_code': [200], u'return_content': False, u'method': u'POST', u'body': u'{"role": "Admin", "name": "grafana_apikey"}', u'url_username': u'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER', u'src': None, u'dest': None, u'selevel': None, u'force_basic_auth': True, u'removes': None, u'http_agent': u'ansible-httpget', u'user': u'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER', u'regexp': None, u'password': u'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER', u'url_password': u'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER', u'url': u'http://192.168.10.131:3000/api/auth/keys', u'validate_certs': True, u'seuser': None, u'client_cert': None, u'creates': None, u'headers': {u'Content-Type': u'application/json', u'Authorization': u'Basic YWRtaW46YWRtaW4='}, u'delimiter': None, u'mode': None, u'timeout': 30, u'attributes': None, u'backup': None}}, u'date': u'Mon, 11 Jun 2018 21:41:14 GMT', u'redirected': False, '_ansible_ignore_errors': None, u'msg': u'OK (126 bytes)'})

TASK [import grafana data source] *******************************************************************************************
changed: [192.168.10.131]

TASK [import grafana dashboards - prepare config] ***************************************************************************
changed: [192.168.10.131 -> localhost]

TASK [import grafana dashboards - run import script] ************************************************************************
changed: [192.168.10.131 -> localhost]

PLAY RECAP ******************************************************************************************************************
192.168.10.101             : ok=16   changed=3    unreachable=0    failed=0   
192.168.10.102             : ok=16   changed=3    unreachable=0    failed=0   
192.168.10.111             : ok=18   changed=3    unreachable=0    failed=0   
192.168.10.112             : ok=18   changed=3    unreachable=0    failed=0   
192.168.10.113             : ok=18   changed=3    unreachable=0    failed=0   
192.168.10.121             : ok=16   changed=3    unreachable=0    failed=0   
192.168.10.122             : ok=16   changed=3    unreachable=0    failed=0   
192.168.10.123             : ok=16   changed=3    unreachable=0    failed=0   
192.168.10.131             : ok=33   changed=11   unreachable=0    failed=0   
localhost                  : ok=1    changed=0    unreachable=0    failed=0   

Congrats! All goes well. :-)
[tidb@contoso200 tidb-ansible]$ 

 

tidb-server进程在监听4000端口,进程id是10994:
ss -lnp|grep 4000
ps -ef|grep 10994

tidb-server进程在监听4000端口,进程id是10954:
ss -lnp|grep 4000
ps -ef|grep 10954

[root@contoso101 ~]# ss -lnp|grep 4000
tcp    LISTEN     0      32768    :::4000                 :::*                   users:(("tidb-server",pid=10994,fd=40))
[root@contoso101 ~]# ps -ef|grep 10994
tidb      10994      1  3 05:40 ?        00:00:42 bin/tidb-server -P 4000 --status=10080 --path=192.168.10.121:2379,192.168.10.122:2379,192.168.10.123:2379 --config=conf/tidb.toml --log-file=/data1/deploy/log/tidb.log
root      11123   1446  0 05:59 pts/0    00:00:00 grep --color=auto 10994
[root@contoso101 ~]#

[root@contoso102 ~]# ss -lnp|grep 4000
tcp    LISTEN     0      32768    :::4000                 :::*                   users:(("tidb-server",pid=10954,fd=40))
[root@contoso102 ~]# ps -ef|grep 10954
tidb      10954      1  3 05:40 ?        00:00:39 bin/tidb-server -P 4000 --status=10080 --path=192.168.10.121:2379,192.168.10.122:2379,192.168.10.123:2379 --config=conf/tidb.toml --log-file=/data1/deploy/log/tidb.log
root      11092   1408  0 06:01 pts/0    00:00:00 grep --color=auto 10954
[root@contoso102 ~]#



测试集群
    测试连接 TiDB 集群,推荐在 TiDB 前配置负载均衡来对外统一提供 SQL 接口。
    使用 MySQL 客户端连接测试,TCP 4000 端口是 TiDB 服务默认端口。

mysql -u root -h 192.168.10.101 -P 4000
mysql -u root -h 192.168.10.102 -P 4000


通过浏览器访问监控平台 ss -lnp|grep 3000

[root@contoso131 ~]# ss -lnp|grep 3000
tcp    LISTEN     0      32768    :::3000                 :::*                   users:(("grafana-server",pid=16911,fd=8))
[root@contoso131 ~]# ps -ef|grep 16911
tidb      16911      1  0 05:41 ?        00:00:07 opt/grafana/bin/grafana-server --homepath=/data1/deploy/opt/grafana --config=/data1/deploy/opt/grafana/conf/grafana.ini
root      17442   1383  0 06:25 pts/0    00:00:00 grep --color=auto 16911
[root@contoso131 ~]#

地址:http://192.168.10.131:3000 默认帐号密码是:admin/admin

 

[tidb@contoso200 tidb-ansible]$ ansible-playbook stop.yml

PLAY [check config locally] *************************************************************************************************

TASK [check_config_static : Ensure only one monitoring host exists] *********************************************************

TASK [check_config_static : Warn if TiDB host not exists] *******************************************************************

TASK [check_config_static : Ensure zookeeper address of kafka cluster is set when enable_binlog] ****************************

TASK [check_config_static : Ensure PD host exists] **************************************************************************

TASK [check_config_static : Ensure TiKV host exists] ************************************************************************

TASK [check_config_static : Ensure enable_tls is disabled when deployment_method is docker] *********************************

TASK [check_config_static : Check ansible_user variable] ********************************************************************

TASK [check_config_static : Close old control master] ***********************************************************************
ok: [localhost]

PLAY [gather all facts, and check dest] *************************************************************************************

TASK [check_config_dynamic : Disk space check - Fail task when disk is full] ************************************************
ok: [192.168.10.131]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.112]
ok: [192.168.10.111]
ok: [192.168.10.121]
ok: [192.168.10.113]
ok: [192.168.10.122]
ok: [192.168.10.123]

TASK [check_config_dynamic : get facts] *************************************************************************************
ok: [192.168.10.111]
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.131]
ok: [192.168.10.112]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.113]

TASK [check_config_dynamic : Preflight check - Set NTP service status] ******************************************************
ok: [192.168.10.131]
ok: [192.168.10.102]
ok: [192.168.10.112]
ok: [192.168.10.101]
ok: [192.168.10.111]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.113]
ok: [192.168.10.123]

TASK [check_config_dynamic : Preflight check - NTP service] *****************************************************************

TASK [check_config_dynamic : Set enable_binlog - set default] ***************************************************************

TASK [check_config_dynamic : Set enable_binlog - set inverse] ***************************************************************

TASK [check_config_dynamic : Set deploy_user - set ansible_user as default] *************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.131]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.122]
ok: [192.168.10.123]
ok: [192.168.10.121]

TASK [check_config_dynamic : Set deploy_dir if not presented] ***************************************************************

TASK [check_config_dynamic : Preflight check - Get umask] *******************************************************************
ok: [192.168.10.101]
ok: [192.168.10.111]
ok: [192.168.10.131]
ok: [192.168.10.102]
ok: [192.168.10.112]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.113]
ok: [192.168.10.123]

TASK [check_config_dynamic : Preflight check - Get Hard Max FD limit] *******************************************************
ok: [192.168.10.112]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.131]
ok: [192.168.10.111]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.113]
ok: [192.168.10.123]

TASK [check_config_dynamic : Preflight check - Does the system have a standard umask?] **************************************

TASK [check_config_dynamic : Preflight check - ulimit -n] *******************************************************************

TASK [check_config_dynamic : Preflight check - Get deploy dir permissions] **************************************************
ok: [192.168.10.131]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.101]
ok: [192.168.10.123]
ok: [192.168.10.113]
ok: [192.168.10.121]
ok: [192.168.10.122]

TASK [check_config_dynamic : Preflight check - Does deploy dir have appropriate permissions?] *******************************

TASK [check_config_dynamic : Check swap] ************************************************************************************

TASK [check_config_dynamic : environment check (deploy dir)] ****************************************************************
ok: [192.168.10.131]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.123]
ok: [192.168.10.121]
ok: [192.168.10.113]
ok: [192.168.10.122]

TASK [check_config_dynamic : environment check (supervise)] *****************************************************************

TASK [check_config_dynamic : config skip variables (default)] ***************************************************************
ok: [192.168.10.131]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]

TASK [check_config_dynamic : config skip variables] *************************************************************************
ok: [192.168.10.131]
ok: [192.168.10.101]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.123]
ok: [192.168.10.122]
ok: [192.168.10.121]

TASK [check_config_dynamic : config skip variables] *************************************************************************

PLAY [monitored_servers] ****************************************************************************************************

TASK [stop node_exporter/blackbox_exporter by supervise] ********************************************************************

TASK [stop node_exporter/blackbox_exporter by systemd] **********************************************************************
changed: [192.168.10.101] => (item=node_exporter-9100.service)
changed: [192.168.10.113] => (item=node_exporter-9100.service)
changed: [192.168.10.111] => (item=node_exporter-9100.service)
changed: [192.168.10.102] => (item=node_exporter-9100.service)
changed: [192.168.10.112] => (item=node_exporter-9100.service)
changed: [192.168.10.112] => (item=blackbox_exporter-9115.service)
changed: [192.168.10.101] => (item=blackbox_exporter-9115.service)
changed: [192.168.10.111] => (item=blackbox_exporter-9115.service)
changed: [192.168.10.102] => (item=blackbox_exporter-9115.service)
changed: [192.168.10.113] => (item=blackbox_exporter-9115.service)
changed: [192.168.10.121] => (item=node_exporter-9100.service)
changed: [192.168.10.123] => (item=node_exporter-9100.service)
changed: [192.168.10.122] => (item=node_exporter-9100.service)
changed: [192.168.10.131] => (item=node_exporter-9100.service)
changed: [192.168.10.121] => (item=blackbox_exporter-9115.service)
changed: [192.168.10.122] => (item=blackbox_exporter-9115.service)
changed: [192.168.10.123] => (item=blackbox_exporter-9115.service)
changed: [192.168.10.131] => (item=blackbox_exporter-9115.service)

TASK [wait for node_exporter down] ******************************************************************************************
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.102]
ok: [192.168.10.111]
ok: [192.168.10.101]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.131]
ok: [192.168.10.122]

TASK [wait for blackbox_exporter down] **************************************************************************************
ok: [192.168.10.102]
ok: [192.168.10.101]
ok: [192.168.10.112]
ok: [192.168.10.111]
ok: [192.168.10.113]
ok: [192.168.10.122]
ok: [192.168.10.121]
ok: [192.168.10.123]
ok: [192.168.10.131]

PLAY [alertmanager_servers] *************************************************************************************************

TASK [stop alertmanager by supervise] ***************************************************************************************

TASK [stop alertmanager by systemd] *****************************************************************************************
changed: [192.168.10.131]

TASK [wait for alertmanager down] *******************************************************************************************
ok: [192.168.10.131]

PLAY [monitoring_servers] ***************************************************************************************************

TASK [stop monitoring modules by supervise] *********************************************************************************

TASK [stop monitoring modules by systemd] ***********************************************************************************
changed: [192.168.10.131] => (item=pushgateway-9091.service)
changed: [192.168.10.131] => (item=prometheus-9090.service)

TASK [wait for pushgateway down] ********************************************************************************************
ok: [192.168.10.131]

TASK [wait for prometheus down] *********************************************************************************************
ok: [192.168.10.131]
 [WARNING]: Could not match supplied host pattern, ignoring: kafka_exporter_servers

PLAY [kafka_exporter_servers] ***********************************************************************************************
skipping: no hosts matched

PLAY [tidb_servers] *********************************************************************************************************

TASK [stop TiDB by supervise] ***********************************************************************************************

TASK [stop TiDB by systemd] *************************************************************************************************
changed: [192.168.10.101]
changed: [192.168.10.102]

TASK [wait for TiDB down] ***************************************************************************************************
ok: [192.168.10.101]
ok: [192.168.10.102]

TASK [stop pump by supervise] ***********************************************************************************************

TASK [stop pump by systemd] *************************************************************************************************

TASK [wait for pump down] ***************************************************************************************************

PLAY [tikv_servers] *********************************************************************************************************

TASK [stop TiKV by supervise] ***********************************************************************************************

TASK [stop TiKV by systemd] *************************************************************************************************
changed: [192.168.10.112]
changed: [192.168.10.111]
changed: [192.168.10.113]

TASK [wait for TiKV down (via Port)] ****************************************************************************************
ok: [192.168.10.111]
ok: [192.168.10.112]
ok: [192.168.10.113]

TASK [wait for TiKV down (via PID)] *****************************************************************************************
ok: [192.168.10.113]
ok: [192.168.10.111]
ok: [192.168.10.112]

TASK [command] **************************************************************************************************************
ok: [192.168.10.112]
ok: [192.168.10.113]
ok: [192.168.10.111]

TASK [display old tikv pid] *************************************************************************************************
ok: [192.168.10.111] => {
    "msg": "tikv binary or docker pid: 11211"
}
ok: [192.168.10.112] => {
    "msg": "tikv binary or docker pid: 11158"
}
ok: [192.168.10.113] => {
    "msg": "tikv binary or docker pid: 11152"
}

PLAY [pd_servers] ***********************************************************************************************************

TASK [stop PD by supervise] *************************************************************************************************

TASK [stop PD by systemd] ***************************************************************************************************
changed: [192.168.10.123]
changed: [192.168.10.122]
changed: [192.168.10.121]

TASK [wait for PD down] *****************************************************************************************************
ok: [192.168.10.121]
ok: [192.168.10.122]
ok: [192.168.10.123]

PLAY [grafana_servers] ******************************************************************************************************

TASK [stop grafana by supervise] ********************************************************************************************

TASK [stop grafana by systemd] **********************************************************************************************
changed: [192.168.10.131]

TASK [wait for grafana down] ************************************************************************************************
ok: [192.168.10.131]

TASK [stop grafana_collector by supervise] **********************************************************************************

TASK [stop grafana_collector by systemd] ************************************************************************************
changed: [192.168.10.131]

TASK [wait for grafana_collector down] **************************************************************************************
ok: [192.168.10.131]

PLAY RECAP ******************************************************************************************************************
192.168.10.101             : ok=15   changed=2    unreachable=0    failed=0   
192.168.10.102             : ok=15   changed=2    unreachable=0    failed=0   
192.168.10.111             : ok=18   changed=2    unreachable=0    failed=0   
192.168.10.112             : ok=18   changed=2    unreachable=0    failed=0   
192.168.10.113             : ok=18   changed=2    unreachable=0    failed=0   
192.168.10.121             : ok=15   changed=2    unreachable=0    failed=0   
192.168.10.122             : ok=15   changed=2    unreachable=0    failed=0   
192.168.10.123             : ok=15   changed=2    unreachable=0    failed=0   
192.168.10.131             : ok=22   changed=5    unreachable=0    failed=0   
localhost                  : ok=1    changed=0    unreachable=0    failed=0   

Congrats! All goes well. :-)
[tidb@contoso200 tidb-ansible]$ pwd
/home/tidb/tidb-ansible
[tidb@contoso200 tidb-ansible]$ 

 

评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值