Spring Cloud 链路追踪-日志收集

使用 RuoyiCloud 微服务将日志通过skywalking收集到elasticsearch 对应的版本在docker-compose.yml

1.docker-compose 安装: nacos 、mysql 、redis  、elasticsearch、kibana、skywalking-oap、skywalking-ui

version: '3.8'
services:
  portainer:
    image: portainer/portainer-ce:latest
    container_name: portainer
    ports:
      - "9000:9000"
    volumes:
      - /home/portainer/data:/data
      - /var/run/docker.sock:/var/run/docker.sock
    restart: always
    # 重置密码 docker run --rm -v /home/portainer/data:/data portainer/helper-reset-password  531aaaAAA...
  redis:
    image: redis:6.2.6
    container_name: redis
    sysctls:
      - net.core.somaxconn=1024
    environment:
      - TZ=Asia/Shanghai
    command: redis-server /etc/redis/redis.conf # 这里会使用本地文件redis.conf挂载到容器的/etc/redis/路径下然后用该文件启动
    ports:
      - "6379:6379"
    volumes:
      - /home/redis/data:/data
      - /home/redis/conf/redis.conf:/etc/redis/redis.conf
      - /home/redis/logs:/logs
    restart: always
  nacos:
    image: nacos/nacos-server:v2.3.0
    container_name: nacos
    restart: always
    depends_on:
      - mysql8
    environment:
      PREFER_HOST_MODE: hostname #如果支持主机名可以使用hostname,否则使用ip,默认也是ip
      SPRING_DATASOURCE_PLATFORM: mysql #数据源平台 仅支持mysql或不保存empty
      MODE: standalone
      MYSQL_SERVICE_HOST: mysql8
      MYSQL_SERVICE_DB_NAME: nacos
      MYSQL_SERVICE_PORT: 3306
      MYSQL_SERVICE_USER: root
      MYSQL_SERVICE_PASSWORD: root
      MYSQL_SERVICE_DB_PARAM: characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
      NACOS_APPLICATION_PORT: 8848
      JVM_XMS: 512m
      JVM_MMS: 320m
    volumes:
      - /home/nacos/standalone-logs/:/home/nacos/logs
      - /home/nacos/plugins/:/home/nacos/plugins
      - /home/nacos/conf/application.properties:/home/nacos/conf/application.properties
    ports:
      - "8848:8848"
      - "9848:9848"
  mysql8: # 服务名称
    image: mysql:8.0.21 # 或其它mysql版本
    container_name: mysql8 # 容器名称
    environment:
    # root用户密码
      - MYSQL_ROOT_PASSWORD=root
      - MYSQL_DATABASE=skywalking  # 创建 skywalking 数据库
    # - TZ=Asia/Shanghai # 设置容器时区 我这里通过下面挂载方式同步的宿主机时区和时间了,这里忽略
    volumes:
      # 映射日志目录,宿主机:容器
      - /home/mysql8/log:/var/log/mysql 
      # 映射数据目录,宿主机:容器
      - /home/mysql8/data:/var/lib/mysql 
      # 映射配置目录,宿主机:容器
      - /home/mysql8/conf.d:/etc/mysql/conf.d 
      # 让容器的时钟与宿主机时钟同步,避免时间的问题,ro是read only的意思,就是只读。
      - /etc/localtime:/etc/localtime:ro 
    ports:
      - 3306:3306 # 指定宿主机端口与容器端口映射关系,宿主机:容器
    restart: always # 容器随docker启动自启


  elasticsearch:
    image: elasticsearch:7.17.10
    container_name: local-es
    privileged: true
    environment:
      - "cluster.name=elasticsearch" #设置集群名称为elasticsearch
      - "discovery.type=single-node" #以单一节点模式启动
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m" #设置使用jvm内存大小
      - bootstrap.memory_lock=true
    volumes:
      # - /home/es/config:/usr/share/elasticsearch/config #配置文件挂载
      # - /home/es/data:/usr/share/elasticsearch/data #数据文件挂载
      - /home/es/plugins:/usr/share/elasticsearch/plugins #插件文件挂载
    ports:
      - 9200:9200
      - 9300:9300
    deploy:
     resources:
        limits:
           cpus: "1"
           memory: 500M
        reservations:
           memory: 200M
    restart: always
           
  kibana:
    image: kibana:7.17.10
    container_name: kibana
    environment:
      ELASTICSEARCH_HOSTS: http://elasticsearch:9200 #设置访问elasticsearch的地址
      I18N_LOCALE: zh-CN
    ports:
      - 5601:5601
    depends_on:
      - elasticsearch #kibana在elasticsearch启动之后再启动
    restart: always

  logstash:
    image: logstash:7.17.10
    container_name: logstash
    volumes:
      - /home/logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
      # - /home/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
    ports:
      - "9650:9650"
      - "9600:9600"
    depends_on:
      - elasticsearch
    restart: always

  # SkyWalking OAP Server 配置
  skywalking-oap:
    image: apache/skywalking-oap-server:9.3.0  # 使用官方 SkyWalking OAP Server 镜像
    container_name: skywalking-oap
    environment:  # 设置环境变量
      # SW_STORAGE: mysql  
      # SW_JDBC_URL: jdbc:mysql://mysql8:3306/skywalking?rewriteBatchedStatements=true&allowMultiQueries=true&useSSL=false 
      # SW_DATA_SOURCE_USE: 3306  
      # SW_DATA_SOURCE_PASSWORD: root  
      TZ: Asia/Shanghai
      SW_STORAGE: elasticsearch
      SW_NAMESPACE: skywalking-index
      SW_STORAGE_ES_CLUSTER_NODES: elasticsearch:9200
      SW_STORAGE_ES_HTTP_PROTOCOL: http
      SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING: true
      # SW_ES_USER: <ES-USER>
      # SW_ES_PASSWORD: <ES-PWD>
    ports:  # 映射端口
      - 11800:11800  # OAP Server 的 gRPC 端口
      - 12800:12800  # UI 代理端口
    depends_on:  # 依赖于 MySQL
      # - mysql8
      - elasticsearch
    restart: always

  # SkyWalking UI 配置
  skywalking-ui:
    image: apache/skywalking-ui:9.3.0  # 使用官方 SkyWalking UI 镜像
    container_name: skywalking-ui
    environment:  
      TZ: Asia/Shanghai
      SW_OAP_ADDRESS: http://skywalking-oap:12800
    ports:  # 映射端口
      - 9001:8080  # UI 端口
    depends_on:  # 依赖于 SkyWalking OAP Server
      - skywalking-oap
    restart: always

 
networks:
  demo-net:
    driver: bridge

挂载的 logstash.conf

input {  
  tcp {  
    mode => "server"
    host => "0.0.0.0"
    port => 9650
    codec => json_lines
  }  
}  
  
filter {  
  # 这里可以添加过滤器来处理日志 
   mutate {
    remove_field => ["level_value", "index_name", "port"] 
  } 
}
  
output {
  elasticsearch {
    hosts => ["http://elasticsearch:9200"]
    index => "ruoyi-%{+YYYY.MM.dd}"
  }
  # stdout { codec => rubydebug }
}

2.  docker-compose 启动后 通过portainer  查看启动情况

3. 调整 RuoyiCloud 的  pom文件 添加 logstash 、skywalking相关依赖 、对应的子模块引入对应依赖 

<properties>
    <!-- properties添加 -->
    <logstash-logback-encoder-version>7.2</logstash-logback-encoder-version>
    <apm-logback.version>9.3.0</apm-logback.version>
</properties>


<dependencies>
    <!-- logstash -->
    <dependency>
        <groupId>net.logstash.logback</groupId>
        <artifactId>logstash-logback-encoder</artifactId>
        <version>${logstash-logback-encoder-version}</version>
    </dependency>

    <!-- SkyWalking 工具类 -->
    <dependency>
        <groupId>org.apache.skywalking</groupId>
        <artifactId>apm-toolkit-trace</artifactId>
        <version>${apm-logback.version}</version>
     </dependency>

     <!-- apm-toolkit-logback-1.x -->
     <dependency>
        <groupId>org.apache.skywalking</groupId>
        <artifactId>apm-toolkit-logback-1.x</artifactId>
        <version>${apm-logback.version}</version>
      </dependency>

      <!-- 网关 -->
      <dependency>
         <groupId>org.apache.skywalking</groupId>
         <artifactId>apm-toolkit-webflux</artifactId>
         <version>${apm-logback.version}</version>
      </dependency>
</dependencies>

 4. 重新设置对应子模块 logback.xml

<?xml version="1.0" encoding="UTF-8"?>
<configuration>

    <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
        <encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
            <layout class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout">
                <Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n</Pattern>
            </layout>
        </encoder>
    </appender>

    <!-- add converter for %tid -->
    <conversionRule conversionWord="tid" converterClass="org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter"/>
    <!-- add converter for %sw_ctx -->
    <conversionRule conversionWord="sw_ctx" converterClass="org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter"/>


    <appender name="logstash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
        <!--    <destination>${logstash.server-addr}</destination>-->
        <destination>192.168.224.139:9650</destination>
        <!-- encoder必须配置,有多种可选 -->
        <encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder">
            <!--在生成的json中会加这个字段-->
            <customFields>{"serviceName":"${spring.application.name}"}</customFields>
            <!-- add TID(traceId) field -->
            <provider class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider">
            </provider>
            <!-- add SW_CTX(SkyWalking context) field -->
            <provider class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider">
            </provider>
        </encoder>
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>${log.level}</level>
        </filter>
        <connectionStrategy>
            <roundRobin>
                <connectionTTL>5 minutes</connectionTTL>
            </roundRobin>
        </connectionStrategy>
    </appender>


    <!-- 设置 Appender -->
    <root level="INFO">
        <appender-ref ref="console"/>
        <appender-ref ref="logstash"/>
    </root>

</configuration>

5.启动对应模块启动命令增加 skywalking-agent 参数

下载对应9.3.0版本的skywalking-java-agent

-javaagent:D:\code\my_project\configuration_file\elasticsearch\skywalking\skywalking-agent\skywalking-agent.jar
-DSW_AGENT_NAME=ruoyi-auth
-DSW_AGENT_COLLECTOR_BACKEND_SERVICES=192.168.224.139:11800

 启动后前端访问查看skywalking-ui服务链路情况

6. 进入kibana 通过链路查询 elasticsearch 收集的日志

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值