springboot+kafka+elk傻瓜式安装

使用的是dokcer-compose 安装

一.安装elk+kafka

注意一点,自己要创建几个文件的,因为需要挂载

#要创建的文件
D:\docker\elasticsearch\data
D:\docker\elasticsearch\logs
d:\docker\elasticsearch\plugins
d:\docker\logstash\pipeline/logstash.conf
version: '3.7'
services:
  elasticsearch:
    image: elasticsearch:7.6.2
    container_name: elasticsearch
    privileged: true
    user: root
    environment:
      #设置集群名称为elasticsearch
      - cluster.name=elasticsearch 
      #以单一节点模式启动
      - discovery.type=single-node 
      #设置使用jvm内存大小
      - ES_JAVA_OPTS=-Xms1g -Xmx1g 
    volumes:
      - d:\docker\elasticsearch\plugins:/usr/share/elasticsearch/plugins
      - d:\docker\elasticsearch\data:/usr/share/elasticsearch/data
      - d:\docker\elasticsearch\logs:/usr/share/elasticsearch/logs
    ports:
      - 9200:9200
      - 9300:9300

  logstash:
    image: logstash:7.6.2
    container_name: logstash
    ports:
       - 4560:4560
    privileged: true
    environment:
      - TZ=Asia/Shanghai
    logging:
      driver: "local"
      options:
        max-size: "10m"  # 控制单个日志文件的最大大小
        max-file: "3"    # 控制保留的日志文件数量
    volumes:
      #挂载logstash的配置文件
      - d:\docker\logstash\pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf 
      - d:\docker\logstash\logs:/var/log/logstash
    depends_on:
      - elasticsearch 
    links:
      #可以用es这个域名访问elasticsearch服务
      - elasticsearch:es 
    

  kibana:
    image: kibana:7.6.2
    container_name: kibana
    ports:
        - 5601:5601
    privileged: true
    links:
      #可以用es这个域名访问elasticsearch服务
      - elasticsearch:es 
    depends_on:
      - elasticsearch 
    environment:
      #设置访问elasticsearch的地址
      - elasticsearch.hosts=http://es:9200 
      #设置系统语言文中文
      - I18N_LOCALE=zh-CN
    volumes:
      #挂载logstash的配置文件
      - d:\docker\kibana\logs:/var/log/kibana

  kafka:
    image: confluentinc/cp-kafka:latest
    container_name: kafka
    environment:
      - KAFKA_BROKER_ID=1
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
       #此处 不要使用127.0.0.1  大概率连接不上kafka
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.2.7:9092
      - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
    ports:
      - 9092:9092
    depends_on:
      - zookeeper
  kafka-manager:
    image: sheepkiller/kafka-manager
    ports:
      - "9000:9000"
    environment:
      ZK_HOSTS: "zookeeper:2181"  # 修改为您的 ZooKeeper 地址
    depends_on:
      - zookeeper

  zookeeper:
    image: confluentinc/cp-zookeeper:latest
    container_name: zookeeper
    environment:
      - ZOOKEEPER_CLIENT_PORT=2181
    ports:
      - 2181:2181

   

二.springboot配置

1.下载依赖

		<!-- kafka -->
		<dependency>
			<groupId>org.springframework.kafka</groupId>
			<artifactId>spring-kafka</artifactId>
		</dependency>
		<!--logstash 整合logback-->

		<dependency>
			<groupId>net.logstash.logback</groupId>
			<artifactId>logstash-logback-encoder</artifactId>
			<version>4.11</version>
			<exclusions>
				<exclusion>
					<groupId>ch.qos.logback</groupId>
					<artifactId>logback-core</artifactId>
				</exclusion>
			</exclusions>
		</dependency>

		<!--logback 整合 kafka-->
		<dependency>
			<groupId>com.github.danielwegener</groupId>
			<artifactId>logback-kafka-appender</artifactId>
			<version>0.1.0</version>
			<scope>runtime</scope>
		</dependency>
		<!-- lombok -->
		<dependency>
			<groupId>org.projectlombok</groupId>
			<artifactId>lombok</artifactId>
		</dependency>

2.congfig配置文件 配置ip地址,让logback.xml能获取到ip地址



import ch.qos.logback.core.PropertyDefinerBase;
import lombok.extern.log4j.Log4j2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Lazy;

import java.net.InetAddress;
import java.net.UnknownHostException;

/**
 * 获取ip地址 发送到logback
 */
@Log4j2
public class LogIpPropertyConfig extends PropertyDefinerBase {

    private static String ip;

    static {
        try {
            ip = InetAddress.getLocalHost().getHostAddress();
        } catch (UnknownHostException e) {
            log.error("获取日志IP地址异常",e);
            ip = null;
        }
    }
    @Override
    public String getPropertyValue() {
        return ip;
    }
}

3.logback.xml配置

<?xml version="1.0" encoding="UTF-8"?>
<configuration>




    <property name="pattern" value="[%date{yyyy-MM-dd HH:mm:ss.SSS}] %X{logthreadId} %-5level %logger{80} %method %line - %msg%n"/>
    <property name="charsetEncoding" value="UTF-8"/>

    <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
        <encoder>
            <pattern>${pattern}</pattern>
            <charset>${charsetEncoding}</charset>
        </encoder>
    </appender>

    <springProperty scope="context" name="service" source="spring.application.name" defaultValue="UnknownService"/>
    #注意哦 此处需要使用自己的路径
    <define name="ip" class="pile.merchant.config.kafka.LogIpPropertyConfig"></define>

    <appender name="KAFKA_APPENDER" class="com.github.danielwegener.logback.kafka.KafkaAppender">
        <encoder class="com.github.danielwegener.logback.kafka.encoding.PatternLayoutKafkaMessageEncoder">
            <layout class="net.logstash.logback.layout.LogstashLayout">
                <!--开启的话会包含hostname等logback的context信息-->
                <includeContext>true</includeContext>
                <!--是否包含日志来源-->
                <includeCallerData>true</includeCallerData>
                <fieldNames class="net.logstash.logback.fieldnames.ShortenedFieldNames"/>
                <customFields>{"ip": "${ip}"}</customFields>
            </layout>
            <charset>UTF-8</charset>
        </encoder>

        <!--kafka topic 需要与配置文件里面的topic一致 否则kafka不认识-->

        <topic>kafka-elk-logg</topic>

        <!--主键分区策略-->

        <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.RoundRobinKeyingStrategy"/>

        <!--kafka消息提交策略,logback-kafka-appender为我们提供了两种策略,
            异步提交策略(AsynchronousDeliveryStrategy)
            阻塞提交策略(BlockingDeliveryStrategy)
        -->

        <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>

        <!--bootstrap.servers 为kafka 部署地址,服务端需要使用对应的IP地址,不能使用localhost -->

        <producerConfig>bootstrap.servers=192.168.2.7:9092</producerConfig>

    </appender>

    <appender name="kafkaAppenderAsync" class="ch.qos.logback.classic.AsyncAppender">

        <appender-ref ref="KAFKA_APPENDER"/>

    </appender>

    <!--记录行为日志到 kafka-->

    <logger name="KafkaPipeline" level="INFO">

        <appender-ref ref="kafkaAppenderAsync"/>

    </logger>


    <!-- 基础日志等级 -->
    <root level="INFO">
        <appender-ref ref="console"/>
        <appender-ref ref="kafkaAppenderAsync"/>
    </root>

</configuration>


4.yml文件配置

#日志配置
logging:
  config: classpath:logback.xml

三.logstash的配置

1.编辑logstash.conf文件

input {
   kafka {
    id => "spring_kafka_elk"
    bootstrap_servers => "kafka:9092"
    topics => ["kafka-elk-logg"]
    auto_offset_reset => "latest" 
  }
}

filter {
  #因为从kafka接收的信息是 json的需要做一些处理
  json {
    source => "message"
  }

   grok {
    match => { "message" => "\[%{TIMESTAMP_ISO8601}\] %{LOGLEVEL:level} %{DATA:logger_name} %{DATA:thread_name} %{NUMBER:line} - %{GREEDYDATA:message}" }
  }

  date {
    match => ["timestamp", "yyyy-MM-dd HH:mm:ss.SSS"]
    target => "@timestamp"
  }
  #根据不同的服务生成不同的索引名称
  ruby {
    code => "
      case event.get('service')
      when 'merchant', 'business', 'admin', 'logic', 'interconnection'
        index_prefix = event.get('service') + '-'
      else
        index_prefix = 'unknown_project-'
      end

      case event.get('level')
      when 'INFO', 'WARN', 'ERROR'
        level_suffix = event.get('level').downcase
      else
        level_suffix = 'unknown_level'
      end


      event.set('index_name', index_prefix + level_suffix +'-' + Time.now.strftime('%Y.%m.%d'))

  "
}

}


output {
  #发送到es
  stdout { codec => rubydebug }
  elasticsearch {
    hosts => "es:9200"
    index => "%{index_name}"
    #设置模板名称 定时删除
    template_name => "%{service}"
    #模板名称重复不覆盖
    template_overwrite => false
  }
  
}

四.配置es模板

1.配置es模板

#创建模板 
# admin bussiness merchant nettyLogic interconnection
PUT _template/merchant
{
  "index_patterns": ["merchant*"],
  "settings": {
    "number_of_replicas": 0,
    "index.lifecycle.name": "test_policy"
  }
}

2.设置生命周期

# admin bussiness merchant nettyLogic interconnection
PUT _ilm/policy/test_policy
{
  "policy": {
    "phases": {
      "delete": {
        "min_age": "1m",  
        "actions": {
          "delete": {}
        }
      }
    }
  }
}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Spring Boot是一个用于构建独立的、生产级别的Spring应用程序的框架。而Kafka是一个分布式的发布-订阅消息系统,可以处理大量数据并提供高吞吐量。在Spring Boot应用程序中使用Kafka可以通过导入spring-kafka的starter依赖来实现。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* [SpringBoot整合Kafka](https://blog.csdn.net/m0_37294838/article/details/127253991)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v92^chatsearchT0_1"}}] [.reference_item style="max-width: 33.333333333333336%"] - *2* [springboot整合kafka](https://blog.csdn.net/m0_74642813/article/details/131307133)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v92^chatsearchT0_1"}}] [.reference_item style="max-width: 33.333333333333336%"] - *3* [springboot-kafka](https://blog.csdn.net/qq_47848696/article/details/125422997)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v92^chatsearchT0_1"}}] [.reference_item style="max-width: 33.333333333333336%"] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值