一、ELK 相关资料
ELK官网: 点击打开链接
ELKstack 中文指南:点击打开链接
二、安装过程
节点1:172.214.5.19
节点2:172.216.18.40
节点3:172.216.33.100
1、Java安装
# yum -y install java-1.8.0
# vi /etc/profile
2、elasticsearch安装
# yum -y install elasticsearch
# mkdir -p /data/elk/data /data/elk/logs
# chown -R elasticsearch /data/elk
# vi /etc/elasticsearch/elasticsearch.yml # 三个节点分别安装 elasticsearch
# vi /etc/security/limits.conf
# vi /usr/lib/systemd/system/elasticsearch.service
# systemctl daemon-reload
# systemctl enable elasticsearch
# systemctl start elasticsearch
# tailf/data/elk/logs/wxtky-elk.log
# netstat -tunlp | egrep "9200|9300"
3、安装head
# yum -y install git
# cd /data/elk
# git clonegit://github.com/mobz/elasticsearch-head.git
# cd elasticsearch-head
# wget https://nodejs.org/dist/v4.6.1/node-v4.6.1-linux-x64.tar.gz
# tar -zxvf node-v4.6.1-linux-x64.tar.gz
# vi /etc/profile
# source /etc/profile
# npm install -g cnpm --registry=https://registry.npm.taobao.org
# npm install –g grunt–cli # npm install grunt 多试几次,我这边用这个成功了
# vi Gruntfile.js +94
# vi _site/app.js +4329
# cd /data/elk/elasticsearch-head
# grunt server
# 开始安装
#
。。。。。。
# 全部安装完毕后再次 grunt server
按照提示 打开 head插件 页面
4、Kibana 安装
# yum安装
# rpm --importhttps://packages.elastic.co/GPG-KEY-elasticsearch
# vi /etc/yum.repos.d/kibana.repo
【kibana-4.6】 name=Kibana repository for 4.6.x packages baseurl=https://packages.elastic.co/kibana/4.6/centos gpgcheck=1 gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch enabled=1 |
# systemctl enable kibana
# 配置
# vi /opt/kibana/config/kibana.yml
server.port: 5601 server.host: "0.0.0.0" elasticsearch.url: "http://172.214.5.19:9200" #节点2修改 kibana.index: ".kibana" |
# firewall-cmd --permanent --add-port='5601/tcp'
# 启动
# systemctl daemon-reload
# systemctl start kibana
# 登入测试
5、kafka安装(节点1&2)
# 解压安装
# cd/usr/local/src
# wget http://mirror.rise.ph/apache/kafka/0.10.2.0/kafka_2.12-0.10.2.0.tgz
# tar -xf kafka_2.12-0.10.2.0.tgz
# mv kafka_2.12-0.10.2.0 /usr/local/
# ln -sv kafka_2.12-0.10.2.0/ kafka
# zookeeper集群配置
# vi /usr/local/kafka/config/zookeeper.properties
dataDir=/data/zookeeper/data dataLogDir=/data/zookeeper/log clientPort=2181 tickTime=2000 initLimit=20 syncLimit=10 server.1=172.214.5.19:2888:3888 server.2=172.216.18.40:2888:3888 |
# mkdir /data/zookeeper
# echo 1 > /data/zookeeper/myid #节点2修改 echo 2 > /data/zookeeper/myid
# vi /usr/local/kafka/config/server.properties
broker.id=1 #节点 2修改 listeners=PLAINTEXT://172.214.5.19:9092 #节点2修改 num.network.threads=3 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 log.dirs=/data/kafka-logs #该目录会自动新建 num.partitions=16 num.recovery.threads.per.data.dir=1 log.retention.hours=168 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=172.214.5.19:2181,172.216.18.40:2181 zookeeper.connection.timeout.ms=6000 |
# 两个节点分别启动
# screen-S zookeeper
# /usr/local/kafka/bin/zookeeper-server-start.sh/usr/local/kafka/config/zookeeper.properties #启集群
# /usr/local/kafka/bin/zookeeper-server-stop.sh #停
# exit
# screen -S kafka
# /usr/local/kafka/bin/kafka-server-start.sh/usr/local/kafka/config/server.properties #启kafka
# /usr/local/kafka/bin/kafka-server-stop.sh #停
# exit
# 两个节点防火墙设置
# firewall-cmd --permanent--add-port={2888/tcp,3888/tcp,2181/tcp,9092/tcp}
# firewall-cmd --reload
# 测试
# 新建一个主题
# /usr/local/kafka/bin/kafka-topics.sh --create--zookeeper localhost:2181 --replication-factor2 --partitions 1 --topic summer
# 查看刚创建的主题
# /usr/local/kafka/bin/kafka-topics.sh --list --zookeeper172.214.5.19:2181
# 查看summer主题的详情
# /usr/local/kafka/bin/kafka-topics.sh --describe --zookeeper172.214.5.19:2181 --topic summer
# 发送消息
# /bin/bash/usr/local/kafka/bin/kafka-console-producer.sh --broker-list 172.214.5.19:9092 --topic summer
this is anews
# 接收消息
# /usr/local/kafka/bin/kafka-console-consumer.sh--zookeeper 172.216.18.40:2181 --topic summer--from-beginning
6、logstash安装(节点1&2)
# yum安装
# rpm --importhttps://packages.elastic.co/GPG-KEY-elasticsearch
# vi /etc/yum.repos.d/logstash.repo
[logstash-2.4] name=Logstash repository for 2.4.x packages baseurl=https://packages.elastic.co/logstash/2.4/centos gpgcheck=1 gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch enabled=1 |
# yuminstall -y logstash
# chkconfig logstash on
# syslog 监控
# ssh 172.216.33.100
# vi /etc/rsyslog.conf
*.* @@172.214.5.19:514 |
# systemctl restart rsyslog
# exit
# 回到节点1
# cd /etc/logstash/conf.d/
# viinput.conf
input { syslog { type => "system-syslog" host => "172.214.5.19" port => "514" } }
output { kafka { #输出到kafka bootstrap_servers => "172.214.5.19:9092,172.216.18.40:9092" #他们就是生产者 topic_id => "system-syslog" #这个将作为主题的名称,将会自动创建 compression_type => "snappy" #压缩类型 } } |
# vi output.conf
input { kafka { zk_connect => "172.214.5.19:2181,172.216.18.40:2181" #消费者们 topic_id => "system-syslog" codec => plain reset_beginning => false consumer_threads => 5 decorate_events => true } }
output { elasticsearch { hosts => ["192.168.1.203:9200","192.168.1.204:9200"] index => "system-syslog-%{+YYYY-MM-dd}" #为了区分之前实验,我这里新生成的所以名字为“test-system-messages-%{+YYYY-MM}” } } |
# /etc/init.d/logstash start
# 测试查看kafka是否有system-syslog主题
# /usr/local/kafka/bin/kafka-topics.sh --list --zookeeper172.214.5.19:2181
system-syslog
# URL登入
# kibana新建索引
填写 system-syslog-*
7、nginx负载均衡(172.216.33.100 端)
# 下载安装
# cd /usr/local/src/
# wget http://nginx.org/download/nginx-1.11.12.tar.gz
# tar -zxf nginx-1.11.12.tar.gz
# yum -y install zlib zlib-devel openssl openssl-devel pcre pcre-devel
# cd nginx-1.11.12/
# ./configure --prefix=/usr/local/nginx
# make && make install
# /usr/local/nginx/sbin/nginx-t #测试
# 配置
# firewall-cmd --permanent --add-port='80/tcp'
# firewall-cmd --reload
# vi /usr/local/nginx/conf/nginx.conf
#user nobody; worker_processes 1; #error_log logs/error.log; #error_log logs/error.log notice; #error_log logs/error.log info; #pid logs/nginx.pid; events { worker_connections 1024; } http { include /usr/local/nginx/conf.d/*.conf; #修改点 include mime.types; default_type application/octet-stream; #log_format main '$remote_addr - $remote_user [$time_local] "$request" ' # '$status $body_bytes_sent "$http_referer" ' # '"$http_user_agent" "$http_x_forwarded_for"'; #access_log logs/access.log main; sendfile on; #tcp_nopush on; #keepalive_timeout 0; keepalive_timeout 65; #gzip on; #server { # server注释掉 #listen 80; # server_name localhost; #charset koi8-r; #access_log logs/host.access.log main; # location / { # root html; # index index.html index.htm; # } #error_page 404 /404.html; # redirect server error pages to the static page /50x.html # # error_page 500 502 503 504 /50x.html; # location = /50x.html { # root html; # } # proxy the PHP scripts to Apache listening on 127.0.0.1:80 # #location ~ \.php$ { # proxy_pass http://127.0.0.1; #} # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 # #location ~ \.php$ { # root html; # fastcgi_pass 127.0.0.1:9000; # fastcgi_index index.php; # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; # include fastcgi_params; #} # deny access to .htaccess files, if Apache's document root # concurs with nginx's one # #location ~ /\.ht { # deny all; #} } # another virtual host using mix of IP-, name-, and port-based configuration # #server { # listen 8000; # listen somename:8080; # server_name somename alias another.alias; # location / { # root html; # index index.html index.htm; # } #} # HTTPS server # #server { # listen 443 ssl; # server_name localhost; # ssl_certificate cert.pem; # ssl_certificate_key cert.key; # ssl_session_cache shared:SSL:1m; # ssl_session_timeout 5m; # ssl_ciphers HIGH:!aNULL:!MD5; # ssl_prefer_server_ciphers on; # location / { # root html; # index index.html index.htm; # } #} #} |
# mkdir /usr/local/nginx/conf.d/
# vi /usr/local/nginx/conf.d/es.conf
upstream es { server 172.214.5.19:5601 max_fails=3 fail_timeout=30s; server 172.216.18.40:5601 max_fails=3 fail_timeout=30s; }
server { listen 80; server_name localhost;
location / { proxy_pass http://es/; index index.html index.htm; #auth auth_basic "ELK Private"; auth_basic_user_file /usr/local/nginx/.htpasswd; }
} |
# /usr/local/nginx/sbin/nginx-t #测试配置文件正确性
# yum -y install httpd-tools # htpasswd命令包
# htpasswd -cm/usr/local/nginx/.htpasswd elk #新建elk用户
# /usr/local/nginx/sbin/nginx #启
# ps -ef | grepnginx | grep -v grep | awk '{print $2}' | xargs kill -9 #停
# 登入测试
URL:http://172.216.33.100
输入账号密码