elk部署文档

1.下载安装docker

yum install -y yum-utils device-mapper-persistent-data lvm2
yum -y install wget vim
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce-18.06.1.ce-3.el7
docker --version
mkdir /etc/docker
cat > /etc/docker/daemon.json << EOF
 {
"registry-mirrors": ["https://jo6348gu.mirror.aliyuncs.com"]
 }
EOF
systemctl enable docker && systemctl start docker

2.配置环境准备

mkdir -p /data/es/config
mkdir -p /data/es2
useradd elasticsearch
chown elasticsearch:elasticsearch /data -R
cat >> /etc/sysctl.conf << EOF
vm.max_map_count=655360
EOF
sysctl -p 

wget https://mirrors.huaweicloud.com/elasticsearch/7.8.0/elasticsearch-7.8.0-linux-x86_64.tar.gz
tar -zxvf elasticsearch-7.8.0-linux-x86_64.tar.gz
cp -r elasticsearch-7.8.0/config/* /data/es/config
docker network create elk_elastic              #创建elk独立网段
vim /data/es/config/elasticsearch.yml

cluster.name: "docker-cluster"                     #集群名,同一个集群该值必须设置成相同的
network.host: 0.0.0.0                              #该参数用于同时设置bind_host和publish_host
node.name: node1                                   #节点名字
node.master: true                                  #该节点有机会成为master节点
node.data: true                                    #该节点可以存储数据
network.publish_host: 172.18.0.1                   #设置其他节点与该节点交互的IP地址
http.port: 9201                                    #对外服务的http端口号
transport.tcp.port: 9301                           #节点之间交互的端口号
http.cors.enabled: true
http.cors.allow-origin: "*"
discovery.seed_hosts: ["172.18.0.1:9301","172.18.0.1:9302"]         #集群中master节点的初始列表,可以通过这些节点来自动发现新加入集群的节点
cluster.initial_master_nodes: ["node1"]
xpack.monitoring.collection.enabled: true
action.auto_create_index: true                    #自动创建索引
cp -r /data/es/config /data/es2/config
chown -R elasticsearch:elasticsearch /data
sed -i 's/node.name: node1/node.name: node2/g' /data/es2/config/elasticsearch.yml
sed -i 's/9201/9202/g' /data/es2/config/elasticsearch.yml
sed -i 's/port: 9301/port: 9302/g' /data/es2/config/elasticsearch.yml

3.elk服务端

创建elasticsearch集群

chmod -R 777 /data/es
chmod -R 777 /data/es2
cd /data
docker run -d --name es-node1  --network elk_elastic -p 9201:9201 -p 9301:9301 -v `pwd`/es/config/:/usr/share/elasticsearch/config -v `pwd`/es/data/:/usr/share/elasticsearch/data elasticsearch:7.8.0
docker run -d --name=es-node2  --network elk_elastic -p 9202:9202 -p 9302:9302 -v `pwd`/es2/config/:/usr/share/elasticsearch/config -v `pwd`/es2/data/:/usr/share/elasticsearch/data elasticsearch:7.8.0

检查集群状态,出现这个字段为正常。 “status” : “green”,

curl 127.0.0.1:9201/_cluster/health?pretty

4.搭建kibana

mkdir /data/kibana
cd /data
vim kibana/kibana.yml

server.name: kibana
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://172.18.0.1:9201","http://172.18.0.1:9202"]       #指定elasticsearch服务
monitoring.ui.container.elasticsearch.enabled: true
docker run -d --name kibana --network elk_elastic -p 5601:5601 -v `pwd`/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml:ro  kibana:7.8.0

5.搭建redis

mkdir -p /data/redis/data

vim redis/data/redis.conf

bind 0.0.0.0
daemonize no
pidfile "/var/run/redis.pid"
port 6380
timeout 300
loglevel warning
logfile "redis.log"
databases 16
rdbcompression yes
dbfilename "redis.rdb"
dir "/data"
requirepass "123456"
masterauth "123456"
maxclients 10000
maxmemory 1000mb
maxmemory-policy allkeys-lru
appendonly yes
appendfsync always
docker run -d --name redis --network elk_elastic -p 6380:6380 -v `pwd`/redis/data/:/data redis:5.0 redis-server  redis.conf

6.搭建logstash

logstash需要准备三个配置文件

mkdir -p /data/logstash/config
vim /data/logstash/config/logstash.yml

http.host: "0.0.0.0"
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.hosts: ["http://172.18.0.1:9201","http://172.18.0.1:9202"]

vim /data/logstash/config/pipelines.yml

- pipeline.id: docker
  path.config: "/usr/share/logstash/config/docker.conf"  #注意此处为容器内部路径

vim /data/logstash/config/docker.conf

input {
        redis {
                host => "172.18.0.1"
                port => "6380"
                password => "123456"
                db => "0"
                data_type => "list"
                key => "localhost"
                threads => 4
#               codec => multiline{
#                 pattern => "^\["
#                 negate => true
#                 what => previous
#               }
        }
}
filter {
       if "exception" in [message] and "UsernameNotFoundException"  not in [message] and "userdetails"  not  in [message] {
         grok {
            match => ["message", "%{TIMESTAMP_ISO8601:time}\s* \s*%{NOTSPACE:thread-id}\s* \s*%{LOGLEVEL:level}\s* \s*%{JAVACLASS:cl                                                                              ass}\s* \- \s*%{JAVALOGMESSAGE:logmessage}\s*"]
        }
         mutate {
        add_field => {"error_log" => "0"}
        }
        }
        else{
         grok {
            match => ["message", "%{TIMESTAMP_ISO8601:time}\s* \s*%{NOTSPACE:thread-id}\s* \s*%{LOGLEVEL:level}\s* \s*%{JAVACLASS:cl                                                                              ass}\s* \- \s*%{JAVALOGMESSAGE:logmessage}\s*"]
}
}
}
output {
        if [error_log] == "0" {
           file {
             path => "/usr/share/logstash/config/error.log"
             codec => line {
             format => "%{message}"
        }
        }
        }
        else{
                elasticsearch {
                        hosts => ["172.18.0.1:9202","172.18.0.1:9201"]
                        index => "filebeat-%{+YYYY.MM.dd}"
                              }
}
}

mkdir -p /data/logstash/pipeline 

docker run -d -p 5044:5044  --network elk_elastic -p 9600:9600 --name logstash  -v /data/logstash/config/:/usr/share/logstash/config  -v /data/logstash/pipeline/:/usr/share/logstash/pipeline logstash:7.8.0

filebeat日志收集

https://blog.csdn.net/cyfblog/article/details/102839590 参考网址

wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.8.0-linux-x86_64.tar.gz

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值