系统环境
- 操作系统:CentOS Linux release 7.6.1810 (Core) / 3.10.0-957.el7.x86_64
- 主机角色
ip | 部署内容 |
10.10.10.110 | JDK,Nodejs,Elasticsearch,kafka,Logstash,Filebeat,Kibana |
10.10.10.177 | JDK,Elasticsearch,kafka,Filebeat |
10.10.10.178 | JDK,Elasticsearch,kafka,Filebeat |
- 系统优化
echo "vm.swappiness=0" >> /etc/sysctl.conf
echo "vm.max_map_count=655350" >> /etc/sysctl.conf
sysctl -p
cat >>/etc/security/limits.conf << EOF
root soft nofile 65535
root hard nofile 65535
* soft nofile 65535
* hard nofile 65535
EOF
目录规划
- 软件下载目录
mkdir -p /data/downloads
- 安装目录(手动)
mkdir -p /data/software
软件下载
环境安装
Java8安装配置
#Java安装
yum install java-1.8.0-openjdk*
#配置Java环境变量
vim /etc/profile
JAVA_HOME=/etc/alternatives/java_sdk_1.8.0
JRE_HOME=$JAVA_HOME/jre
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/jre/lib/rt.jar
export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/bin
export CLASSPATH
Nodejs12.13安装配置
#解压
tar -vxf node-v12.13.0-linux-x64.tar.xz
#移动到安装目录
mv node-v12.13.0-linux-x64 /data/software/node12
#配置node环境变量
vim /etc/profile
export NODEJS_PATH=/data/software/node12
export PATH=$NODEJS_PATH/bin:$PATH
软件安装
Elasticsearch安装配置
- ES安装
#初始主节点,必须三个节点都启动,集群才能启动成功。
- 主库配置
- 从配置
- ES启动
#解压
tar -zvxf elasticsearch-7.6.2.tar.gz
#移动解压的目录到安装目录下
mv elasticsearch-7.6.2 /data/software/elk
#进入目录
cd /data/software/elk/elasticsearch-7.6.2
mkdir -p /data/software/elk/elasticsearch-7.6.2/{data,logs}
#创建ES用户(ES不允许root启动)
useradd -m elasticsearch
chown -R elasticsearch:elasticsearch /data/software/elk/elasticsearch-7.6.2
cluster.name: test_logcollect_essvr
#节点名称
node.name: node110
#是不是有资格竞选主节点
node.master: true
#是否存储数据
node.data: true
#最大集群节点数
#node.max_local_storage_nodes: 3
#网关地址
network.host: 10.10.10.110
#端口
http.port: 19200
#内部节点之间沟通端口
transport.tcp.port: 19300
# 设置这个参数来保证集群中的节点可以知道其它N个有master资格的节点。默认为1,对于大的集群来说,可以设置大一点的值(2-4)
discovery.zen.minimum_master_nodes: 2
#es7.x 之后新增的配置,写入候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: ["10.10.10.110:19300","10.10.10.177:19300","10.10.10.178:19300"]
#es7.x 之后新增的配置,初始化一个新的集群时需要此配置来选举master
cluster.initial_master_nodes: ["node110","node177","node178"]
#数据存储路径
path.data: /data/software/elk/elasticsearch-7.6.2/data
#日志存储路径
path.logs: /data/software/elk/elasticsearch-7.6.2/logs
#index.number_of_shards: 5
#index.number_of_replicas: 1
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
http.cors.enabled: true
http.cors.allow-origin: "*"
cluster.name: test_logcollect_essvr
#节点名称
node.name: node177
#是不是有资格竞选主节点
node.master: true
#是否存储数据
node.data: true
#最大集群节点数
#node.max_local_storage_nodes: 3
#网关地址
network.host: 10.10.10.177
#端口
http.port: 19200
#内部节点之间沟通端口
transport.tcp.port: 19300
# 设置这个参数来保证集群中的节点可以知道其它N个有master资格的节点。默认为1,对于大的集群来说,可以设置大一点的值(2-4)
discovery.zen.minimum_master_nodes: 2
#es7.x 之后新增的配置,写入候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: ["10.10.10.110:19300","10.10.10.177:19300","10.10.10.178:19300"]
#es7.x 之后新增的配置,初始化一个新的集群时需要此配置来选举master
cluster.initial_master_nodes: ["node110","node177","node178"]
#数据存储路径
path.data: /data/software/elk/elasticsearch-7.6.2/data
#日志存储路径
path.logs: /data/software/elk/elasticsearch-7.6.2/logs
#index.number_of_shards: 5
#index.number_of_replicas: 1
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
http.cors.enabled: true
http.cors.allow-origin: "*"
su elasticsearch # 切换到elasticsearch用户
./bin/elasticsearch -d
kafka 安装配置
- 安装
- 配置zk(/data/software/elk/kafka/config/zookeeper.properties)
配置kafka(/data/software/elk/kafka/config/server.properties)
- 服务启动
cd /data/downloads
tar -zvxf kafka_2.12-2.5.0.tgz
mv kafka_2.12-2.5.0 /data/software/elk/kafka
cd /data/software/elk/kafka
mkdir -p /data/software/elk/kafka/data/zookeeper
mkdir -p /data/software/elk/kafka/logs
#创建myid文件,集群各节点id不同
echo "1" > /data/software/elk/kafka/data/zookeeper/myid
# the directory where the snapshot is stored.
dataDir=/data/software/elk/kafka/data/zookeeper
# the port at which the clients will connect
clientPort=2181
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
# Disable the adminserver by default to avoid port conflicts.
# Set the port to something non-conflicting if choosing to enable this
admin.enableServer=false
# admin.serverPort=8080
tickTime=2000
initLimit=20
syncLimit=10
server.1=10.10.10.110:2888:3888
server.2=10.10.10.177:2888:3888
server.3=10.10.10.178:2888:3888
#唯一数字分别为1,2,3
broker.id=1
#这个broker监听的端口
prot=9092
#唯一填服务器IP
host.name=10.10.10.110
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
#kafka日志路径,不需要提前创建,启动kafka时创建
log.dirs=/data/software/elk/kafka/logs
#分片数,需要配置较大,分片影响读写速度
num.partitions=16
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
#zookpeer集群
zookeeper.connect=10.10.10.110:2181,10.10.10.177:2181,10.10.10.178:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
#按serverid顺启动zookeeper
nohup /data/software/elk/kafka/bin/zookeeper-server-start.sh /data/software/elk/kafka/config/zookeeper.properties >/tmp/zookeeper.log &
netstat -nlpt | grep -E "2181|2888|3888" #哪台是leader,那么他就拥有2888端口
#按serverid顺启动kafka
nohup /data/software/elk/kafka/bin/kafka-server-start.sh /data/software/elk/kafka/config/server.properties >/tmp/kafka.log &
Logstash 安装配置
- 安装
- 配置kafka传数据到es(/data/software/elk/logstash-7.6.2/config/kafka-es.conf)
- 服务启动
tar -zvxf logstash-7.6.2.tar.gz
mv logstash-7.6.2 /data/software/elk
cd /data/software/elk/logstash-7.6.2
makdir -p /data/software/elk/logstash-7.6.2/logs
input {
kafka {
bootstrap_servers => "10.10.10.110:9092,10.10.10.177:9092,10.10.10.178:9092" #kafka集群地址
topics_pattern => ".*" #正则匹配topic
#topics => ["ng_cardapi_access","ng_cardapi_error"] #数组形式表示多个topic
codec => "json" #解析格式
consumer_threads => 5 #最大线程
decorate_events => true #将当前topic、offset、group、partition等信息也带到message中
auto_offset_reset => "latest" #从最新的偏移量开始消费
}
}
output {
elasticsearch {
hosts => ["10.10.10.110:19200","10.10.10.177:19200","10.10.10.178:19200"] #ES集群信息
index => "%{[log_topic]}_%{+YYYY.MM.dd}" #索引格式建议按天切割
#index => "%{[@metadata][kafka][topic]}_%{+YYYY.MM.dd}" #也可以在metadata字段中获取topic变量
}
#条件判断的写法
# if [log_topic] == "ng_cardapi_access" {
# elasticsearch {
# hosts => ["10.10.10.110:19200","10.10.10.177:19200","10.10.10.178:19200"] #ES集群信息
# index => "ng_cardapi_access_%{+YYYY.MM.dd}" #索引格式建议按天切割
# }
# }
#
# if [log_topic] == "ng_cardapi_error" {
# elasticsearch {
# hosts => ["10.10.10.110:19200","10.10.10.177:19200","10.10.10.178:19200"] #ES集群信息
# index => "ng_cardapi_error_%{+YYYY.MM.dd}" #索引格式建议按天切割
# }
# }
stdout {
codec => rubydebug {metadata => true} #控制台输出日志包含@metadata信息,便于查看并引用其中的变量
}
}
nohup /data/software/elk/logstash-7.6.2/bin/logstash -f /data/software/elk/logstash-7.6.2/config/kafka-es.conf > /tmp/logstach.log &
#测试配置文件
/data/software/elk/logstash-7.6.2/bin/logstash -t -f /data/software/elk/logstash-7.6.2/config/kafka-es.conf
filebeat 安装配置
- 安装
- 配置
- 服务启动
cd /data/downloads/
tar -zxvf filebeat-7.6.2-linux-x86_64.tar.gz
mv filebeat-7.6.2-linux-x86_64/ /data/software/elk/filebeat-7.6.2
filebeat.inputs:
- type: log #日志类型
enabled: true
json.keys_under_root: true #可以让字段位于根节点
json.overwrite_keys: true #对于同名的key,覆盖原有key值
json.add_error_key: true #解析失败,把错误放出来
fields_under_root: true #可以让字段位于根节点
paths:
- /data/logs/nginx/card/card-api.access_log #日志文件路径
fields:
log_topic: ng_cardapi_access #指定日志topic名称
- type: log #日志类型
enabled: true
json.keys_under_root: true #可以让字段位于根节点
json.overwrite_keys: true #对于同名的key,覆盖原有key值
json.add_error_key: true #解析失败,把错误放出来
fields_under_root: true #可以让字段位于根节点
paths:
- /data/logs/nginx/card/card-api.error_log #日志文件路径
fields:
log_topic: ng_cardapi_error #指定日志topic名称
output.kafka:
enabled: true
hosts: [10.10.10.110:9092,10.10.10.177:9092,10.10.10.178:9092] #kafka集群地址
topic: '%{[log_topic]}' #fileds.log_topic 定义的值
nohup /data/software/elk/filebeat-7.6.2/filebeat -e -c /data/software/elk/filebeat-7.6.2/filebeat.yml > /tmp/filebeat.log &
Kibana 安装配置
- 安装(依赖node)
- 配置(/data/software/elk/kibana-7.6.2/config)
- 服务启动
cd /data/downloads/
tar -zxvf kibana-7.6.2-linux-x86_64.tar.gz
mv kibana-7.6.2-linux-x86_64 /data/software/elk/kibana-7.6.2
i18n.locale: "zh-CN"
server.port: 5601 #服务器端口,默认5601
server.host: "10.10.10.110" #主机IP地址
elasticsearch.hosts: ["http://10.10.10.110:19200","http://10.10.10.177:19200","http://10.10.10.178:19200"] #ES地址
logging.dest: /data/software/elk/kibana-7.6.2/logs/kibana.log
kibana.index: ".kibana"
xpack.reporting.encryptionKey: "a_random_string" #随机密钥
xpack.security.encryptionKey: "something_at_least_32_characters" #最少32位随机字符
nohup /data/software/elk/kibana-7.6.2/bin/kibana --allow-root & > /tmp/kibana.log 2>&1 &