一、安装logstash
mkdir -p /home/docker/logstash-docker
vi docker-compose.yml
docker-compose.yml 内容如下:
version: '2.2'
networks:
esnet:
driver: bridge
services:
logstash:
image: logstash:7.6.0
hostname: logstash
container_name: logstash
environment:
- TZ="Asia/Shanghai"
volumes:
- /home/tools/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro #主配置文件
- /home/tools/logstash/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml:ro #pipeline 配置文件
- /home/tools/logstash/lib/ojdbc8-19.3.0.0.jar:/usr/share/logstash/config/ojdbc8-19.3.0.0.jar:ro
- /home/tools/logstash/config/pipelines/:/usr/share/logstash/config/pipelines:ro
- /etc/localtime:/etc/localtime:ro #时区同步
environment: #设置东八时区上海时间
- SET_CONTAINER_TIMEZONE=true
- CONTAINER_TIMEZONE=Asia/Shanghai
ports:
- "5044:5044"
networks:
- esnet
logstash.yml文件内容如下:
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: ["http://192.168.0.11:9201","http://192.168.0.11:9202"]
xpack.monitoring.enabled: true # 收集监控数据
pipelines.yml内容如下:
- pipeline.id: RESOURCE_CLASSIFY
path.config: "/usr/share/logstash/config/pipelines/RESOURCE_CLASSIFY.conf"
- pipeline.id: RESOURCE_COMPANY
path.config: "/usr/share/logstash/config/pipelines/RESOURCE_COMPANY.conf"
- pipeline.id: RESOURCE_DANGER_COAL_MINE
path.config: "/usr/share/logstash/config/pipelines/RESOURCE_DANGER_COAL_MINE.conf"
- pipeline.id: RESOURCE_DANGER_ENTERPRISE
path.config: "/usr/share/logstash/config/pipelines/RESOURCE_DANGER_ENTERPRISE.conf"
单个表的同步文件如下:
input {
stdin {
}
jdbc {
# mysql 数据库链接,mybatis为数据库名
jdbc_connection_string => "jdbc:oracle:thin:@//60.173.243.***:1521/ORCL"
# 用户名和密码
jdbc_user => "***"
jdbc_password => "***"
# 驱动 修改为mysql连接包位置
jdbc_driver_library => "/usr/share/logstash/config/ojdbc8-19.3.0.0.jar"
# 驱动类名
jdbc_driver_class => "Java::oracle.jdbc.driver.OracleDriver"
record_last_run => "true"
use_column_value => "false"
tracking_column => "id"
last_run_metadata_path => "/usr/share/logstash/config/info.txt"
clean_run => "false"
jdbc_paging_enabled => "true"
jdbc_page_size => "50000"
# 执行的sql 文件路径+名称
jdbc_default_timezone => "Asia/Shanghai" #设置时区
statement => "select 'RESOURCE_EXPERT_CERTIFICATE' table_name , a.* from RESOURCE_EXPERT_CERTIFICATE a where UPDATE_DATE > :sql_last_value" # 执行的sql命令 select * from RESOURCE_EXPERT_CERTIFICATE
# 设置监听间隔 各字段含义(由左至右)分、时、天、月、年,全部为*默认含义为每分钟都更新
schedule => "* * * * *"
type => "tstype"
}
}
filter {
mutate {
rename => { "@tags" => "channel" }
}
ruby {
code => "event.set('timestamp', event.get('@timestamp').time.localtime + 8*60*60)"
}
ruby {
code => "event.set('@timestamp',event.get('timestamp'))"
}
mutate {
remove_field => ["timestamp"]
}
}
output {
elasticsearch {
# ES的IP地址及端口
hosts => ["192.168.6.201:9201","192.168.6.201:9202"]
# 索引名称 可自定义
index => "expert"
# 需要关联的数据库中有有一个id字段,对应类型中的id
document_id => "%{id}"
document_type => "_doc"
}
stdout {
# JSON格式输出
codec => json_lines
}
}
启动:
nohup docker-compose up &