1.先pull镜像
docker pull logstash:7.6.2
2.创建映射的配置文件夹
mkdir -p /usr/share/logstash/conf.d
mkdir /usr/share/logstash/log
3./usr/share/logstash下创建配置文件
vim logstash.yml
内容
path.config: /usr/share/logstash/conf.d/*.conf
path.logs: /usr/share/logstash/log
3./usr/share/logstash/conf.d下创建配置文件
vim test.conf
内容
input {
stdin {
}
jdbc {
jdbc_connection_string => "jdbc:mysql://url:port/database?useSSL=false&useUnicode=true&characterEncoding=utf-8&useLegacyDatetimeCode=false&allowPublicKeyRetrieval=true&serverTimezone=Asia/Shanghai"
jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
jdbc_user => "root"
jdbc_password => "root"
jdbc_paging_enabled => "true"
jdbc_page_size => "50000"
jdbc_default_timezone => "Asia/Shanghai"
jdbc_driver_library => "/usr/share/logstash/conf.d/mysql-connector-java-8.0.28.jar" #数据库连接包
statement => "SELECT * FROM table_name"
schedule => "* * * * *" #cron表达式,多久搬运一次数据
lowercase_column_names => false # 保留sql字段的大写
}
}
filter {
date {
match => ["time_stamp","yyyy-MM-dd HH:mm:ss.SSS"] #将表数据中time_stamp字段的数据映射到@time_stamp中
target => "@time_stamp"
}
}
output {
elasticsearch {
hosts => ["http://url:9200"] #对应的elasticsearch的地址
index => "your self index" #数据搬运到access_log这个索引中
document_id => "%{id}" #映射上面数据库查出来的表id
}
stdout {
codec => json_lines #美化output输出
}
}
并在这个目录下面
wget https://downloads.mysql.com/archives/get/p/3/file/mysql-connector-java-8.0.28.zip
unzip mysql-connector-java-8.0.28.zip
cd mysql-connector-java-8.0.28
mv mysql-connector-java-8.0.28.jar ../
cd ../
rm -rf mysql-connector-java-8.0.28
rm -rf mysql-connector-java-8.0.28.zip
4.运行容器
docker run -di --restart=always --log-driver json-file --log-opt max-size=100m --log-opt max-file=2 -p 5044:5044 -p 9600:9600 --name logstash -v /usr/share/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml -v /usr/share/logstash/conf.d/:/usr/share/logstash/conf.d/ -d logstash:7.6.2
docker exec -it logstash /bin/bash
查看运行效果
docker log -f logstash
出现了遍历数据库就算成功
或者打开kibana查看