docker实现部署VictoriaLogs+logstash+kibana+kafka实现日志中心

拳打 ES 脚踢 Loki,VictoriaLogs 正式版来了_快猫星云Flashcat

VictoriaMetrics/VictoriaMetrics: VictoriaMetrics: fast, cost-effective monitoring solution and time series database

docker部署ELK日志系统+kafka_docker安装elk+kafka-CSDN博客 

docker run --rm -it -p 9428:9428 -v ./victoria-logs-data:/victoria-logs-data \
  docker.io/victoriametrics/victoria-logs:v1.0.0-victorialogs

从docker拉取镜像迅速配置vl

配置日志采集规则

创建kafka的数据存储目录,并赋予目前权限:

chmod 777 kafka_data

创建VL的数据存储目录,并赋予目前权限:

chmod 777 vl_data

logstash的镜像相对来说比较难找

如果您计划使用 Kafka 的 KRaft 模式(Kafka Raft),那么可以完全移除 ZooKeeper。KRaft 模式是 Kafka 未来的发展方向,旨在替代 ZooKeeper 进行元数据管理。以下是如何配置 Kafka 使用 KRaft 模式的步骤:

1. 移除 ZooKeeper:

  • 从 docker-compose.yml 中删除 ZooKeeper 服务。
  • 配置 Kafka 使用 KRaft 模式:
  • 确保 KAFKA_ENABLE_KRAFT 设置为 "yes"。
  • 设置 KAFKA_KRAFT_CLUSTER_ID 为一个唯一的 UUID。
  • 移除 KAFKA_CFG_ZOOKEEPER_CONNECT。

docker compose 配置文件 

version: '3.3'
services:
   kafka:
    image: bitnami/kafka:3.3.2
    container_name: kafka1
    hostname: kafka
    volumes:
      - ./kafka_data:/bitnami/kafka
    ports:
      - "9092:9092"
      - "29093:9093"  # 修改外部端口为 29093
    environment:
      # KRaft 模式配置
      KAFKA_ENABLE_KRAFT: "yes"
      KAFKA_CFG_PROCESS_ROLES: "broker,controller"
      KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
      KAFKA_CFG_LISTENERS: "PLAINTEXT://:9092,CONTROLLER://:9093"
      KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT"
      KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://123.207.73.78:9092"
      KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
      KAFKA_KRAFT_CLUSTER_ID: "FDAF211E728140229F6FCDF4ADDC0B32"
      ALLOW_PLAINTEXT_LISTENER: "yes"
      KAFKA_BROKER_ID: 1
      KAFKA_CFG_NODE_ID: 1
      KAFKA_HEAP_OPTS: "-Xmx512M -Xms256M"
    restart: always
  victorialogs:
    image: docker.io/victoriametrics/victoria-logs:v1.0.0-victorialogs
    container_name: victorialogs
    hostname: victorialogs
    volumes:
      - ./ victoria-logs-data:/usr/share/victorialogs/data # 赋予es_data目前权限:chmod 777 es_data
    restart: always
    environment:
      - "discovery.type=single-node"
      - "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
    ports:
      - "9428:9428"
  logstash:
    image: docker.elastic.co/logstash/logstash:7.4.2
    container_name: logstash
    volumes:
      - ./conf.d/syslog.conf:/usr/share/logstash/pipeline/logstash.conf
      - ./logstash.yml:/usr/share/logstash/config/logstash.yml
    depends_on:
      - victorialogs
    environment:
      LS_JAVA_OPTS: "-Xmx256m -Xms128m"
      victorialogs_HOST: "http://123.207.73.78:9428"
  kibana:
    image: kibana:7.4.2
    restart: always
    container_name: kibana1
    ports:
      - 5601:5601
    environment:
      victorialogs_URL: "http://123.207.73.78:9428"
    depends_on:
      - victorialogs

 

 测试kafka是否配置完成

#进入容器
docker exec -it 容器id bash
#进入到bin目录
cd /opt/kafka/bin/
#创建topic测试
./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic elk-log
./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic elk-log
#查看topic
kafka-topics.sh --list --zookeeper localhost:2181
#删除topic
kafka-topics.sh --delete --zookeeper localhost -topic elk-log

#测试是否成功
#生产者,发送消息
./kafka-console-producer.sh --broker-list localhost:9092 --topic elk-log
#消费者,订阅消息
./kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic elk-log

配置logstash

#创建挂载配置
mkdir -p /data/elk/logstash
vi /data/elk/logstash/logstash.yml

#配置内容
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch容器ip:9200" ]
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: changeme
path.config: /data/docker/logstash/conf.d/*.conf
path.logs: /var/log/logstash

mkdir -p /data/elk/logstash/conf.d
vi /data/elk/logstash/conf.d/syslog.conf

#配置内容
input {
  kafka {
    bootstrap_servers => "localhost:9092"
    topics => ["system-provder-log"] # kafka主题
    codec => "json"
    auto_offset_reset => "earliest" #从最早的偏移量开始消费
    decorate_events => true    #此属性会将当前topic、offset、group、partition等信息也带到message中
    type => "system_log" #所有插件通用属性,尤其在input里面配置多个数据源时很有用
  }
}
# 解决与中国本地时间偏移8小时问题
filter {
    ruby {
                code => "event.set('timestamp', event.get('@timestamp').time.localtime + 8*60*60)"
        }

        # grok 从日志正则匹配得到
        grok {
                match => {"message"=> "%{TIMESTAMP_ISO8601:timestamp}"}
        }

        mutate {
                convert => ["timestamp", "string"]
                gsub => ["timestamp", "T([\S\s]*?)Z", ""]
                gsub => ["timestamp", "-", "."]
        }
}
output {
  # 如果不需要打印可以直接删除
  stdout {
        codec => rubydebug {metadata => true}  #logstash控制台输出日志和@metadata信息
    }
 
  # 通过type用于区分不同来源的日志
  if [type] == "system_log" {
        elasticsearch {
            hosts => ["http://localhost:9200"]
            index => "system_log-%{timestamp}"
        }
    }
}

7da40883aecb4d08bf08433ba04c41a6.png

 日志发送队列 创建发送者

package com.it.weblogclient.LogDeque;

import io.micrometer.common.util.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Component;

import java.util.concurrent.LinkedBlockingDeque;
@Component
public class LogDeque {
    /**
     * 本地队列
     */
    private static LinkedBlockingDeque<String> logMsgs = new LinkedBlockingDeque<>();

    @Autowired

    private KafkaTemplate<String, Object> kafkaTemplate;
    public void log(String msg) {
        logMsgs.offer(msg);
    }

    public LogDeque() {
        new LogThread().start();
    }

    /**
     * 创建线程,从队列中获取日志内容,然后以异步的形式发送消息到MQ
     */
    class LogThread extends Thread {
        @Override
        public void run() {
            while (true) {
                String msgLog = logMsgs.poll();
                if (!StringUtils.isEmpty(msgLog)) {
                    // 发送消息
                    kafkaTemplate.send("weblog", msgLog);
                    kafkaTemplate.send("mongolog", msgLog);
                }
                // 避免cpu飙高的问题
                try {
                    Thread.sleep(200);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }
}



日志切面

将带有注解的部分写入队列

package com.it.weblogclient.aop;


import com.alibaba.fastjson.JSONObject;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.it.weblogclient.LogDeque.LogDeque;
import jakarta.servlet.http.HttpServletRequest;
import lombok.extern.slf4j.Slf4j;
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.aspectj.lang.annotation.Pointcut;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;

import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.stream.Collectors;

@Aspect
@Component
@Slf4j
public class MethodAspect {

    @Autowired
    private LogDeque logDeque;
    private static final Logger logger = LoggerFactory.getLogger(OperateAspect.class);

    @Pointcut("@annotation(com.it.weblogclient.annotation.Weblog)")
    public void weblogPointcut() {
    }

    @Before("weblogPointcut()")
    public void methodBefore(JoinPoint joinPoint) {
        ServletRequestAttributes requestAttributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
        HttpServletRequest request = requestAttributes.getRequest();

        JSONObject jsonObject = new JSONObject();
        // 设置日期格式
        SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
        jsonObject.put("request_time", df.format(new Date()));
        jsonObject.put("request_url", request.getRequestURL().toString());
        jsonObject.put("request_ip", request.getRemoteAddr());
        jsonObject.put("request_method", request.getMethod());
        jsonObject.put("request_args", Arrays.toString(joinPoint.getArgs()));

        // 将日志信息投递到MQ
        String logMsg = jsonObject.toJSONString();
        log.info("<AOP日志 ===》 MQ投递消息:{}>", logMsg);

        // 投递msg
        logDeque.log(logMsg);
    }
}



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值