Nginx Rsyslog Kafka Logstash Elasticsearch Kibana kafka-manager KafkaOffsetMonitor ansible-playbook配置记录
环境及角色:
OS: centos 7.4 64bit
172.21.32.7, ansible kafka-manager; kibana nginx kafka-manager KafkaOffsetMonitor
172.21.32.11, es master; zookeeper; kafka
172.21.32.14, es data; zookeeper; kafka
172.21.32.15, ea data; zookeeper; kafka
1. jdk8 环境配置:
#安装jdk8的playbook文件:主机组采用变量形式,其它playbook文件也为同样格式,对应roles目录下为需要用到的文件
[root@VM_32_7_centos ansible-elk]#cat jdk8.yml
- hosts: '{{ host }}'
remote_user: root
roles:
- jdk8
#ansible标准的目录结构,以下主要列出了主任务文件内容。
[root@VM_32_7_centos jdk8]# tree
.
|-- files
| `-- jdk-8u131-linux-x64.tar.gz
|-- handlers
|-- meta
|-- tasks
| `-- main.yml
|-- templates
`-- vars
`-- main.yml
#main.yml 文件
[root@VM_32_7_centos jdk8]# cat tasks/main.yml
- name: mkdir necessary catalog
file: path=/usr/java state=directory mode=0755
- name: copy and unzip jdk
unarchive: src={{jdk_package_name}} dest=/usr/java/
- name: set env
lineinfile: dest={{env_file}} insertafter="{{item.position}}" line="{{item.value}}" state=present
with_items:
- {position: EOF, value: "\n"}
- {position: EOF, value: "export JAVA_HOME=/usr/java/{{jdk_version}}"}
- {position: EOF, value: "export PATH=$JAVA_HOME/bin:$PATH"}
- {position: EOF, value: "export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar"}
- name: enforce env
shell: source {{env_file}}
#ansible 语法检查:
ansible-playbook jdk8.yml --extra-vars "host=elk" --syntax-check
#playbook执行:
ansible-playbook jdk8.yml --extra-vars "host=elk"
#验证java环境:
ansible elk -m shell -a "source /etc/profile && /usr/java/jdk1.8.0_131/bin/java -version"
2. 安装elasticsearch
[root@VM_32_7_centos es_server]# cat tasks/main.yml
- name: create es user
user: name=es
- name: mkdir directory for elasticsearch data
file: dest=/data/logs mode=0755 state=directory owner=es group=es recurse=yes
tags: logdir
- name: mkdir directory for elasticsearch data log
file: dest=/data/esData mode=0755 state=directory owner=es group=es recurse=yes
tags: datadir
- name: copy and untar
unarchive: src=elasticsearch-6.5.3.tar.gz dest=/usr/local/ owner=es group=es
- name:
template: src=elasticsearch.yml dest=/usr/local/elasticsearch-6.5.3/config/elasticsearch.yml owner=es group=es
tags: configfile
- name: start es
shell: source /etc/profile && su - es -c '/usr/local/elasticsearch-6.5.3/bin/elasticsearch -d'
tags:
- start
#相关ansible host,针对不同主机不同变量值的变量写在host中
[es_host]
172.21.32.11 node_name=master node_master=true node_data=false myid=1 broker_id=0
172.21.32.14 node_name=date-node1 node_master=false node_data=true myid=2 broker_id=1
172.21.32.15 node_name=date-node2 node_master=false node_data=true myid=3 broker_id=2
#执行 playbook
ansible-playbook es.yml --extra-vars "host=es_host"
#验证es:
ansible es_host -m shell -a "netstat -nutlp | egrep '9200|9300'"
#es 集群健康检查
curl '172.21.32.11:9200/_cluster/health?pretty'
3. 安装zookeeper 集群
[root@VM_32_7_centos zk_server]# cat tasks/main.yml
- name: install zookeeper
unarchive: src=zookeeper-3.4.13.tar.gz dest=/usr/local/
- name: mkdir necessary catalog
file: path=/usr/local/zookeeper-3.4.13/dataDir state=directory mode=0755
- name: install configuration file for zookeeper
template: src=zoo.cfg dest=/usr/local/zookeeper-3.4.13/conf/zoo.cfg
- name: add myid file
shell: echo {{ myid }} > /usr/local/zookeeper-3.4.13/dataDir/myid
- name: start zookeeper
shell: source /etc/profile && /usr/local/zookeeper-3.4.13/bin/zkServer.sh start
tags:
- start
#执行 playbook
ansible-playbook zk.yml --extra-vars "host=es_host"
#验证:zookeeper
echo stat | nc -v 172.21.32.11 2181
echo stat | nc -v 172.21.32.14 2181
echo stat | nc -v 172.21.32.15 2181
ansible es_host -m shell -a "netstat -nutlp | egrep '2181'"
#停止 zookeeper
ansible es_host -m shell -a "source /etc/profile && /usr/local/zookeeper-3.4.13/bin/zkServer.sh stop"
#启动:zookeeper
ansible es_host -m shell -a "source /etc/profile && /usr/local/zookeeper-3.4.13/bin/zkServer.sh start"
4. 安装kafka server and manger
[root@VM_32_7_centos kafka_server]# cat tasks/main.yml
- name: copy and unzip kafka
unarchive: src=kafka_2.12-2.1.0.tgz dest=/usr/local/
when: broker_id != 3
- name: install configuration file for kafka
template: src=server.properties dest=/usr/local/kafka_2.12-2.1.0/config/server.properties
when: broker_id != 3
- name: start kafka
shell: source /etc/profile && JMX_PORT=9997 /usr/local/kafka_2.12-2.1.0/bin/kafka-server-start.sh -daemon /usr/local/kafka_2.12-2.1.0/config/server.properties &
when: broker_id != 3
tags:
- start
- name: copy and unizp kafka-manager
unarchive: src=kafka-manager-1.3.3.21.zip dest=/usr/local/
when: ansible_default_ipv4['address'] == "{{kafka_manager_ip}}"
- name: install configuration file for kafka-manager
template: src=application.conf dest=/usr/local/kafka-manager-1.3.3.21/conf/application.conf
when: ansible_default_ipv4['address'] == "{{kafka_manager_ip}}"
- name: start kafka-manager
shell: source /etc/profile && nohup /usr/local/kafka-manager-1.3.3.21/bin/kafka-manager &
when: ansible_default_ipv4['address'] == "{{kafka_manager_ip}}"
tags:
- kafkaManagerStart
#执行 playbook
ansible-playbook kafka.yml --extra-vars "host=kafka"
#检查 kafka端口:9092 JMX端口:9997 kafka-manager端口:9000
ansible kafka -m shell -a "netstat -nutlp | egrep '9000|9092|9997'"
#停止kafka
ansible kafka -m shell -a "source /etc/profile && /usr/local/kafka_2.12-2.1.0/bin/kafka-server-stop.sh"
#启动kafka
ansible kafka -m shell -a "source /etc/profile && JMX_PORT=9997 /usr/local/kafka_2.12-2.1.0/bin/kafka-server-start.sh -daemon /usr/local/kafka_2.12-2.1.0/config/server.properties &"
#启动kafka manager
ansible 172.21.32.7 -m shell -a "source /etc/profile && nohup /usr/local/kafka-manager-1.3.3.21/bin/kafka-manager &"
#查看topics
/usr/local/kafka_2.12-2.1.0/bin/kafka-topics.sh --list --zookeeper 172.21.32.11:2181,172.21.32.14:2181,172.21.32.15:2181
#生产者 topic:test
/usr/local/kafka_2.12-2.1.0/bin/kafka-console-producer.sh --broker-list 172.21.32.11:9092,172.21.32.14:9092,172.21.32.15:9092 --topic test
#消费者 topic:test
/usr/local/kafka_2.12-2.1.0/bin/kafka-console-consumer.sh --bootstrap-server 172.21.32.11:9092,172.21.32.14:9092,172.21.32.15:9092 --topic test --from-beginning
5. 安装 Logstash
[root@VM_32_7_centos logstash_server]# cat tasks/main.yml
- name: copy and untar logstash
unarchive: src=logstash-6.5.3.tar.gz dest=/usr/local/
when: broker_id == 3
- name:
template: src=pipeline.conf dest=/usr/local/logstash-6.5.3/config/pipeline.conf
tags: configfile
when: broker_id == 3
- name: start logstash
shell: source /etc/profile && /usr/local/logstash-6.5.3/bin/logstash -f /usr/local/logstash-6.5.3/config/pipeline.conf --config.reload.automatic &
when: broker_id == 3
tags:
- start
#logstash配置文件:pipeline.conf,注:client_id => "" 需要将client_id设为"",否则有错误:Error registering AppInfo mbean
[root@VM_32_7_centos logstash_server]# cat templates/pipeline.conf
input {
kafka {
bootstrap_servers => "172.21.32.11:9092,172.21.32.14:9092,172.21.32.15:9092"
topics => ["rsyslog_nginx"]
group_id => "logstash"
client_id => ""
codec => plain
consumer_threads => 3
decorate_events => true
type => "nginx-access"
}
}
filter {
mutate {
gsub => ["message", "\\x", "\\\x"]
}
json {
source => "message"
}
date {
match => ["time_local","dd/MMM/yyyy:HH:mm:ss Z"]
target => "@timestamp"
}
}
output {
elasticsearch {
hosts => ["172.21.32.14:9200", "172.21.32.15:9200"]
index => "rsyslog-nginx-%{+YYYY.MM.dd}"
}
}
#执行 ansible playbook
ansible-playbook logstash.yml --extra-vars "host=kafka" --tags "start"
#使用ansible启动
ansible 172.21.32.7 -m shell -a "source /etc/profile && /usr/local/logstash-6.5.3/bin/logstash -f /usr/local/logstash-6.5.3/config/pipeline.conf --config.reload.automatic &"
#注:使用playbook 及ansible命令行均启动失败,没有发现错误信息,原因未知,本地手动启动正常。
#测试配置文件:
/usr/local/logstash-6.5.3/bin/logstash -f /usr/local/logstash-6.5.3/config/pipeline.conf --config.test_and_exit
#启动
/usr/local/logstash-6.5.3/bin/logstash -f /usr/local/logstash-6.5.3/config/pipeline.conf --config.reload.automatic &
#检查 logstash 端口:9600
ansible kafka -m shell -a "netstat -nutlp | egrep '9600|9000|9092|9997'"
6. 安装kibana
[root@VM_32_7_centos kibana_server]# cat tasks/main.yml
- name: copy and untar
unarchive: src=kibana-6.5.3-linux-x86_64.tar.gz dest=/usr/local/
when: broker_id == 3
- name:
template: src=kibana.yml dest=/usr/local/kibana-6.5.3-linux-x86_64/config/kibana.yml
tags: configfile
when: broker_id == 3
- name: start kibana
shell: source /etc/profile && cd /usr/local/kibana-6.5.3-linux-x86_64/bin && nohup /usr/local/kibana-6.5.3-linux-x86_64/bin/kibana &
when: broker_id == 3
tags:
- start
#执行playbook
ansible-playbook kibana.yml --extra-vars "host=kafka"
#可以使用tags参数来指定执行步骤。--tags "start"
#远程启动失败,本地手动启动
#检查:端口5601
ansible kafka -m shell -a "netstat -nutlp | egrep '9600|9000|5601'"
7. 安装nginx
[root@VM_32_7_centos web_server]# cat tasks/main.yml
- name: Install nginx by yum
yum:
name: openssl-devel,readline-devel,pcre-devel,gcc,nginx
state: present
when: broker_id == 3
- name: install configuration file for nginx
template: src=nginx.conf dest=/etc/nginx/nginx.conf
when: broker_id == 3
tags:
- configfile
- name: start nginx
shell: systemctl start nginx.service
when: broker_id == 3
tags:
- start
#执行playbook
ansible-playbook nginx.yml --extra-vars "host=kafka"