docker-compose 方式 启动 es, kibana用来做数据的图表
docker-compose.yml
version: '3'
networks:
qz:
volumes:
esdata:
services:
es:
image: docker.elastic.co/elasticsearch/elasticsearch:7.7.0
container_name: es
environment:
- "cluster.name=laini-cluster"
- "discovery.type=single-node"
- "ES_JAVA_OPTS=-Xms4000m -Xmx4000m"
- "XPACK_MONITORING_ENABLED=false"
- "XPACK_MONITORING_COLLECTION_ENABLED=false"
- "ES_NETWORK_HOST=0.0.0.0"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- esdata:/usr/share/elasticsearch/data
networks:
- qz
expose:
- 9300
- 9200
ports:
- "9200:9200"
restart: always
kibana:
image: docker.elastic.co/kibana/kibana:7.7.0
container_name: kibana
ports:
- 5601:5601
volumes:
- ./kibana:/usr/share/kibana/config
networks:
- qz
depends_on:
- es
启动 docker-compose up -d
logstash 的配置文件
input {
stdin {
}
jdbc {
jdbc_connection_string => "jdbc:mysql://192.168.21.51:3306/tsdb?characterEncoding=UTF-8&useSSL=false&autoReconnect=true&serverTimezone=Asia/Shanghai"
jdbc_user => "root"
jdbc_password => "root"
jdbc_driver_library => "/data/sl/logstash/mysql-connector-java-5.1.47.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_paging_enabled => "true"
jdbc_page_size => "50000"
statement => "SELECT id,content from t_sensor a WHERE a.id > :sql_last_value order by a.id"
schedule => "* * * * *"
codec => json {charset => "UTF-8"}
type => silu
lowercase_column_names => false
record_last_run => true
use_column_value => false
tracking_column => "id"
last_run_metadata_path => "sensor_last_id"
clean_run => false
}
}
filter {
mutate{
remove_field => ["@version","message","host","path"]
}
}
output {
if [type]=="silu"{
elasticsearch {
hosts => ["192.168.21.46:9200"]
index => "silu"
document_type => "_doc"
document_id => "%{id}"
}
stdout {
codec => rubydebug
}
}
}