前言
Docker设置挂载目录或文件时,注意将宿主机对应文件或目录设置好权限。
如果不知道容器哪些文件需要挂载,就先任何文件不挂载运行起来,然后将对应的文件从容器拷贝到宿主机,然后删除该容器,重新新建容器挂载相应文件。
1. 准备镜像
docker pull mysql:8.0.23
docker pull elasticsearch:7.17.0
docker pull kibana:7.17.0
docker pull logstash:7.17.0
2. 创建及规划网络
docker network create -d bridge --subnet 172.18.1.0/28 elk
组件 | IP |
---|---|
MySQL | 172.18.1.2 |
ElasticSearch | 172.18.1.3 |
Kibana | 172.18.1.4 |
Logstash | 172.18.1.5 |
3. 运行MySQL
3.1 启动容器
docker run --name=mysql -it
-p 3306:3306 --restart=always
-v /usr/local/mysql/data:/var/lib/mysql
-v /usr/local/mysql/log:/var/log/mysql
-v /usr/local/mysql/conf:/etc/mysql/conf.d
-v /usr/local/mysql/files:/var/lib/mysql-files
-e MYSQL_ROOT_PASSWORD=123456
--privileged=true --net elk --ip 172.18.1.2 mysql:8.0.23
3.2 MySQL配置root账户
- 进入镜像 docker exec -it mysql /bin/bash
- mysql -uroot -p
- use mysql;
- 修改加密规则
ALTER USER 'root'@'localhost' IDENTIFIED BY '123456' PASSWORD EXPIRE NEVER;
- 更新一下用户的密码
ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '123456';
- 查看root连接权限
select host from user where user='root';
- 允许其他ip访问
update user set host = '%' where user ='root';
- 刷新权限
flush privileges;
3.3 准备数据
- 创建product数据库
- 创建product表,并初始化数据
#①创建product数据库
#②创建product表,并初始化数据
DROP TABLE IF EXISTS `product`;
CREATE TABLE `product` (
`id` int(0) NOT NULL AUTO_INCREMENT,
`name` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
`price` decimal(10, 2) DEFAULT NULL,
`update_time` datetime(0) DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP(0),
PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;
INSERT INTO `product` VALUES (1, '华为手机', 8888.00, '2023-02-23 23:30:04');
INSERT INTO `product` VALUES (2, '苹果手机', 5500.00, '2023-02-22 22:30:36');
INSERT INTO `product` VALUES (3, '小米手机', 3200.00, '2023-02-23 00:04:14');
INSERT INTO `product` VALUES (4, 'vivo手机', 4000.00, '2023-02-23 00:05:54');
INSERT INTO `product` VALUES (5, 'OPPO手机', 4200.00, '2023-02-23 00:06:10');
INSERT INTO `product` VALUES (6, '荣耀手机', 2000.00, '2023-02-23 00:06:24');
INSERT INTO `product` VALUES (7, '一加手机', 4500.00, '2023-02-23 00:06:42');
INSERT INTO `product` VALUES (8, '三星手机', 4800.00, '2023-02-23 00:07:08');
4. 运行es
注意当不知道挂载文件时请参照前言。
docker run --name es -p 9200:9200 -p 9300:9300
-e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx128m"
-v /usr/local/docker/es/data/:/usr/share/elasticsearch/data
-v /usr/local/docker/es/plugins:/usr/share/elasticsearch/plugins
-v /usr/local/docker/es/logs:/usr/share/elasticsearch/logs
-d --net elk --ip 172.18.1.3 elasticsearch:7.17.0
5. 运行kibana
- 运行
docker run --name kibana -d -p 5601:5601 -d --net elk --ip 172.18.1.4 kibana:7.17.0
- 配置
(1) docker cp 拷贝 kibana/config 到宿主机对应的配置文件夹
docker cp kibana:/usr/share/kibana/config /Users/johnny/Desktop/kibana
(2) 修改kibana.yml
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://172.18.1.3:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN" # 中文显示
(3) 修改后的kibana.yml 再拷贝回容器/usr/share/kibana/config文件夹下,并重启容器
6. 运行Logstash
6.1 运行容器
docker run --name logstash --restart=always
-p 5044:5044 -p 9600:9600
-e ES_JAVA_OPTS="-Duser.timezone=Asia/Shanghai"
-v /usr/local/docker/logstash/config:/usr/share/logstash/config
-v /usr/local/docker/logstash/pipeline:/usr/share/logstash/pipeline
--privileged=true -d --net elk --ip 172.18.1.5 logstash:7.17.0
6.2 Mysql插件安装
- 进入容器
docker exec -it logstash bash
- 查询logstash-input-jdbc插件
./bin/logstash-plugin list --verbose
- 如果没有就安装插件(一般默认安装)
./bin/logstash-plugin install logstash-input-jdbc
- 退出容器
exit
6.3 同步配置
- 下载jar包“mysql-connector-java-8.0.23.jar”,存放在/usr/local/docker/logstash/config/jars目录下
- 在/usr/local/docker/logstash/config/conf.d下创建jdbc.conf文件
input{
jdbc{
# 连接数据库
jdbc_connection_string => "jdbc:mysql://172.18.1.2:3306/product?serverTimezone=Asia/Shanghai&characterEncoding=utf8&useSSL=false"
jdbc_user => "root"
jdbc_password => "123456"
# 连接数据库的驱动包
jdbc_driver_library => "/usr/share/logstash/config/jars/mysql-connector-java-8.0.23.jar"
jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
jdbc_paging_enabled => "true"
jdbc_page_size => "50000"
codec => plain { charset => "UTF-8" }
# 数据追踪
# 追踪的字段
tracking_column => "update_time"
# 上次追踪的元数据存放位置
last_run_metadata_path => "/usr/share/logstash/config/lastrun/logstash_jdbc_last_run"
# 设置时区
jdbc_default_timezone => "Asia/Shanghai"
# sql 文件地址
# statement_filepath => ""
# sql
statement => "SELECT p.id AS id,p.name AS name,p.price AS price FROM product p WHERE p.update_time > :sql_last_value"
# 是否清除 last_run_metadata_path 的记录,如果为真那么每次都相当于从头开始查询所有的数据库记录
clean_run =>false
# 这是控制定时的,重复执行导入任务的时间间隔,第一位是分钟 不设置就是1分钟执行一次
schedule => "* * * * *"
}
}
output{
elasticsearch{
# 要导入到的Elasticsearch所在的主机
hosts => "172.18.1.3:9200"
# 要导入到的Elasticsearch的索引的名称
index => "product"
# 类型名称(类似数据库表名)
#document_type => "merchanteso"
# 主键名称(类似数据库表名)
document_id => "%{id}"
}
stdout{
# JSON 格式输出
codec => json_lines
}
}
- 更改/usr/local/docker/logstash/config/logstash.yml文件
http.host: "0.0.0.0"
#es地址
xpack.monitoring.elasticsearch.hosts: [ "http://172.18.1.3:9200" ]
#同步配置文件地址
path.config: /usr/share/logstash/config/conf.d/*.conf
#日志地址
path.logs: /user/share/logstash/logs
- 重启logstash
docker restart logstash