DockerCompose部署系列:构建ELK并同步MySQL数据

# 拉取镜像
docker pull mysql:5.7
# 启动镜像
docker run -p 3306:3306 --name mysql -v /data/mysql/log:/var/log/mysql -v /data/mysql/data:/var/lib/mysql -v /data/mysql/conf:/etc/mysql -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7

# 配置MySQL
vim /data/mysql/conf/my.cnf

[client]
default-character-set=utf8
[mysql]
default-character-set=utf8
[mysqld]
init_connect='SET collation_connection = utf8_unicode_ci'
init_connect='SET NAMES utf8'
character-set-server=utf8
collation-server=utf8_unicode_ci
skip-character-set-client-handshake
skip-name-resolve
# 重启MySQL
docker restart mysql

# 建表
DROP TABLE IF EXISTS `sys_log`;
CREATE TABLE `sys_log`  (
  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '日志主键',
  `title` varchar(50) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '模块标题',
  `business_type` int(2) NULL DEFAULT 0 COMMENT '业务类型(0其它 1新增 2修改 3删除)',
  `method` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '方法名称',
  `request_method` varchar(10) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '请求方式',
  `oper_name` varchar(50) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '操作人员',
  `oper_url` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '请求URL',
  `oper_ip` varchar(128) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '主机地址',
  `oper_time` datetime(0) NULL DEFAULT NULL COMMENT '操作时间',
  PRIMARY KEY (`id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 1585197503834284034 CHARACTER SET = utf8 COLLATE = utf8_general_ci COMMENT = '操作日志记录' ROW_FORMAT = Dynamic;

SET FOREIGN_KEY_CHECKS = 1;

# ELK搭建准备
# elasticsearch挂载
mkdir -p /data/elk/elasticsearch/{config,plugins,data,logs}
# 赋权
chmod 777 /data/elk/elasticsearch/{config,plugins,data,logs}

# kibana挂载
mkdir -p /data/elk/kibana/config

# logstash挂载
mkdir -p /data/elk/logstash/config
# 赋权
chmod 777 /data/elk/logstash/config

# 配置elasticsearch
vim /data/elk/elasticsearch/config/elasticsearch.yml

http.host: 0.0.0.0
xpack.security.enabled: false

# 配置kibana
vim /data/elk/kibana/config/kibana.yml

server.host: 0.0.0.0
elasticsearch.hosts: [ "http://192.168.5.11:9200" ]

# 配置logstash
vim /data/elk/logstash/config/logstash.yml

http.host: 0.0.0.0
xpack.monitoring.elasticsearch.hosts: [ "http://192.168.5.11:9200" ]

# 创建日志文件
touch /data/elk/logstash/config/log
chmod 777 /data/elk/logstash/config/log

# 自行下载mysql-connector-java-8.0.28.jar 放到/data/elk/logstash/config/
# 配置logstash文件
vim /data/elk/logstash/config/logstash.conf
input {
    stdin {
    }
    jdbc {
      jdbc_connection_string => "jdbc:mysql://192.168.5.11:3306/test?useUnicode=true&characterEncoding=utf8&serverTimezone=UTC"
      jdbc_user => "root"
      jdbc_password => "123456"
      jdbc_driver_library => "/usr/share/logstash/config/mysql-connector-java-8.0.28.jar"
      jdbc_driver_class => "com.mysql.jdbc.Driver"
      jdbc_paging_enabled => "true"
      jdbc_page_size => "300000"
      statement => "SELECT id, title, business_type, method, request_method, oper_name, oper_url, oper_ip, oper_time FROM sys_log"
      schedule => "*/1 * * * *"
      use_column_value => false
      tracking_column_type => "timestamp"
      tracking_column => "oper_time"
      record_last_run => true
      jdbc_default_timezone => "Asia/Shanghai"
      last_run_metadata_path => "/usr/share/logstash/config/log"
    }
}

output {
    elasticsearch {
        hosts => ["192.168.5.11:9200"]
        index => "sys_log"
        document_id => "%{id}"
    }
    stdout {
        codec => json_lines
    }
}

# 配置流水线
vim /data/elk/logstash/config/pipelines.yml

- pipeline.id: sys_log
  path.config: "/usr/share/logstash/config/logstash.conf"
  
# 编辑dockerCompose一键搭建
vim docker-compose.yml

version: '3'
services:
  elasticsearch:
    image: elasticsearch:7.17.7
    container_name: elasticsearch
    ports:
      - "9200:9200"
      - "9300:9300"
    environment:
      - cluster.name=elasticsearch
      - discovery.type=single-node
      - "ES_JAVA_OPTS=-Xms64m -Xmx512m"
    volumes:
      - /data/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins
      - /data/elk/elasticsearch/data:/usr/share/elasticsearch/data
      - /data/elk/elasticsearch/logs:/usr/share/elasticsearch/logs

  kibana:
    image: kibana:7.17.7
    container_name: kibana
    ports:
      - "5601:5601"
    depends_on:
      - elasticsearch
    environment:
      I18N_LOCALE: zh-CN
    volumes:
      - /data/elk/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml

  logstash:
    image: logstash:7.17.7
    container_name: logstash
    ports:
      - "5044:5044"
    volumes:
      - /data/elk/logstash/config:/usr/share/logstash/config
    depends_on:
      - elasticsearch

# docker-compose.yml文件下执行	  
docker-compose up -d  

# 测试访问Kibana
http://192.168.5.11:5601/app/home#/



# 安装完之后,单个启动使用
# 启动elasticsearch
docker run --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx512m" -v /data/elk/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /data/elk/elasticsearch/data:/usr/share/elasticsearch/data -v  /data/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins -d elasticsearch:7.17.7
# 启动kibana
docker run --name kibana -e ELASTICSEARCH_HOSTS=http://192.168.5.11:9200 -p 5601:5601 -d kibana:7.17.7
# 启动logstash
docker run --name logstash -p 5044:5044 -v /data/elk/logstash/config:/usr/share/logstash/config -d logstash:7.17.7

在Kibana上创建索引,然后再数据库的sys_log表中添加测试数据。将会自动同步到ES中去

# 创建与数据库表一样的索引结构
PUT /sys_log
{
  "settings": {
    "number_of_shards": 1,
    "number_of_replicas": 0,
    "index": {
      "max_result_window": 100000000
    }
  },
  "mappings": {
    "dynamic": "strict",
    "properties": {
      "@timestamp": {
        "type": "date"
      },
      "@version": {
        "type": "text",
        "fields": {
          "keyword": {
            "type": "keyword",
            "ignore_above": 256
          }
        }
      },

      "business_type": {
        "type": "integer"
      },
      "title": {
        "type": "text"
      },
      "method": {
        "type": "text"
      },
      "request_method": {
        "type": "text"
      },
      "oper_name": {
        "type": "text"
      },
      "oper_url": {
        "type": "text"
      },
      "oper_ip": {
        "type": "text"
      },
      "oper_time": {
        "type": "date"
      },
      "id": {
        "type": "long"
      }
    }
  }
}


# 查询索引
GET /sys_log/_search
{
  "query": {
    "match_all": {}
  }
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值