将CloudWatch Logs与Cloudhub Mule集成

在此博客中,我将解释如何为您的Mule CloudHub应用程序启用AWS Cloudwatch日志 。 AWS提供了Cloudwatch Logs Services,以便您可以更好地管理日志。 它比松散便宜。 由于cloudhub会自动翻转超过100 MB的日志,因此我们需要一种机制来更有效地管理日志。 为此,我们创建了这个自定义附加程序,它将日志发送到cloudwatch。

package com.javaroots.appenders;

import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.toList;

import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Formatter;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.Filter;
import org.apache.logging.log4j.core.Layout;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.config.plugins.Plugin;
import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
import org.apache.logging.log4j.core.config.plugins.PluginElement;
import org.apache.logging.log4j.core.config.plugins.PluginFactory;
import org.apache.logging.log4j.status.StatusLogger;

import com.amazonaws.regions.Regions;
import com.amazonaws.services.logs.AWSLogs;
import com.amazonaws.services.logs.model.CreateLogGroupRequest;
import com.amazonaws.services.logs.model.CreateLogStreamRequest;
import com.amazonaws.services.logs.model.CreateLogStreamResult;
import com.amazonaws.services.logs.model.DataAlreadyAcceptedException;
import com.amazonaws.services.logs.model.DescribeLogGroupsRequest;
import com.amazonaws.services.logs.model.DescribeLogStreamsRequest;
import com.amazonaws.services.logs.model.InputLogEvent;
import com.amazonaws.services.logs.model.InvalidSequenceTokenException;
import com.amazonaws.services.logs.model.LogGroup;
import com.amazonaws.services.logs.model.LogStream;
import com.amazonaws.services.logs.model.PutLogEventsRequest;
import com.amazonaws.services.logs.model.PutLogEventsResult;

@Plugin(name = "CLOUDW", category = "Core", elementType = "appender", printObject = true)
public class CloudwatchAppender extends AbstractAppender {
 
 /**
  * 
  */
 private static final long serialVersionUID = 12321345L;
 
 private static Logger logger2 = LogManager.getLogger(CloudwatchAppender.class);

 private final Boolean DEBUG_MODE = System.getProperty("log4j.debug") != null;

    /**
     * Used to make sure that on close() our daemon thread isn't also trying to sendMessage()s
     */
    private Object sendMessagesLock = new Object();

    /**
     * The queue used to buffer log entries
     */
    private LinkedBlockingQueue loggingEventsQueue;

    /**
     * the AWS Cloudwatch Logs API client
     */
    private AWSLogs awsLogsClient;

    private AtomicReference lastSequenceToken = new AtomicReference<>();

    /**
     * The AWS Cloudwatch Log group name
     */
    private String logGroupName;

    /**
     * The AWS Cloudwatch Log stream name
     */
    private String logStreamName;

    /**
     * The queue / buffer size
     */
    private int queueLength = 1024;

    /**
     * The maximum number of log entries to send in one go to the AWS Cloudwatch Log service
     */
    private int messagesBatchSize = 128;

    private AtomicBoolean cloudwatchAppenderInitialised = new AtomicBoolean(false);
 

    private CloudwatchAppender(final String name,
                           final Layout layout,
                           final Filter filter,
                           final boolean ignoreExceptions,String logGroupName, 
                           String logStreamName,
                           Integer queueLength,
                           Integer messagesBatchSize) {
        super(name, filter, layout, ignoreExceptions);
        this.logGroupName = logGroupName;
        this.logStreamName = logStreamName;
        this.queueLength = queueLength;
        this.messagesBatchSize = messagesBatchSize;
        this.activateOptions();
    }

    @Override
    public void append(LogEvent event) {
      if (cloudwatchAppenderInitialised.get()) {
             loggingEventsQueue.offer(event);
         } else {
             // just do nothing
         }
    }
    
    public void activateOptions() {
        if (isBlank(logGroupName) || isBlank(logStreamName)) {
            logger2.error("Could not initialise CloudwatchAppender because either or both LogGroupName(" + logGroupName + ") and LogStreamName(" + logStreamName + ") are null or empty");
            this.stop();
        } else {
          //below lines work with aws version 1.9.40 for local build
          //this.awsLogsClient = new AWSLogsClient();
          //awsLogsClient.setRegion(Region.getRegion(Regions.AP_SOUTHEAST_2));
          this.awsLogsClient = com.amazonaws.services.logs.AWSLogsClientBuilder.standard().withRegion(Regions.AP_SOUTHEAST_2).build();
            loggingEventsQueue = new LinkedBlockingQueue<>(queueLength);
            try {
                initializeCloudwatchResources();
                initCloudwatchDaemon();
                cloudwatchAppenderInitialised.set(true);
            } catch (Exception e) {
                logger2.error("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName, e);
                if (DEBUG_MODE) {
                    System.err.println("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName);
                    e.printStackTrace();
                }
            }
        }
    }
    
    private void initCloudwatchDaemon() {
     Thread t = new Thread(() -> {
            while (true) {
                try {
                    if (loggingEventsQueue.size() > 0) {
                        sendMessages();
                    }
                    Thread.currentThread().sleep(20L);
                } catch (InterruptedException e) {
                    if (DEBUG_MODE) {
                        e.printStackTrace();
                    }
                }
            }
        });
     t.setName("CloudwatchThread");
     t.setDaemon(true);
     t.start();
    }
    
    private void sendMessages() {
        synchronized (sendMessagesLock) {
            LogEvent polledLoggingEvent;
            final Layout layout = getLayout();
            List loggingEvents = new ArrayList<>();

            try {

                while ((polledLoggingEvent = loggingEventsQueue.poll()) != null && loggingEvents.size() <= messagesBatchSize) {
                    loggingEvents.add(polledLoggingEvent);
                }
                List inputLogEvents = loggingEvents.stream()
                        .map(loggingEvent -> new InputLogEvent().withTimestamp(loggingEvent.getTimeMillis())
                          .withMessage
                          (
                            layout == null ?
                            loggingEvent.getMessage().getFormattedMessage():
                            new String(layout.toByteArray(loggingEvent), StandardCharsets.UTF_8)
                            )
                          )
                        .sorted(comparing(InputLogEvent::getTimestamp))
                        .collect(toList());

                if (!inputLogEvents.isEmpty()) {

                    PutLogEventsRequest putLogEventsRequest = new PutLogEventsRequest(
                            logGroupName,
                            logStreamName,
                            inputLogEvents);

                    try {
                        putLogEventsRequest.setSequenceToken(lastSequenceToken.get());
                        PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);
                        lastSequenceToken.set(result.getNextSequenceToken());
                    } catch (DataAlreadyAcceptedException dataAlreadyAcceptedExcepted) {
                     
                        putLogEventsRequest.setSequenceToken(dataAlreadyAcceptedExcepted.getExpectedSequenceToken());
                        PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);
                        lastSequenceToken.set(result.getNextSequenceToken());
                        if (DEBUG_MODE) {
                            dataAlreadyAcceptedExcepted.printStackTrace();
                        }
                    } catch (InvalidSequenceTokenException invalidSequenceTokenException) {
                        putLogEventsRequest.setSequenceToken(invalidSequenceTokenException.getExpectedSequenceToken());
                        PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);
                        lastSequenceToken.set(result.getNextSequenceToken());
                        if (DEBUG_MODE) {
                            invalidSequenceTokenException.printStackTrace();
                        }
                    }
                }
            } catch (Exception e) {
                if (DEBUG_MODE) {
                 logger2.error(" error inserting cloudwatch:",e);
                    e.printStackTrace();
                }
            }
        }
    }

    private void initializeCloudwatchResources() {

        DescribeLogGroupsRequest describeLogGroupsRequest = new DescribeLogGroupsRequest();
        describeLogGroupsRequest.setLogGroupNamePrefix(logGroupName);

        Optional logGroupOptional = awsLogsClient
                .describeLogGroups(describeLogGroupsRequest)
                .getLogGroups()
                .stream()
                .filter(logGroup -> logGroup.getLogGroupName().equals(logGroupName))
                .findFirst();

        if (!logGroupOptional.isPresent()) {
            CreateLogGroupRequest createLogGroupRequest = new CreateLogGroupRequest().withLogGroupName(logGroupName);
            awsLogsClient.createLogGroup(createLogGroupRequest);
        }

        DescribeLogStreamsRequest describeLogStreamsRequest = new DescribeLogStreamsRequest().withLogGroupName(logGroupName).withLogStreamNamePrefix(logStreamName);

        Optional logStreamOptional = awsLogsClient
                .describeLogStreams(describeLogStreamsRequest)
                .getLogStreams()
                .stream()
                .filter(logStream -> logStream.getLogStreamName().equals(logStreamName))
                .findFirst();
        if (!logStreamOptional.isPresent()) {
            CreateLogStreamRequest createLogStreamRequest = new CreateLogStreamRequest().withLogGroupName(logGroupName).withLogStreamName(logStreamName);
            CreateLogStreamResult o = awsLogsClient.createLogStream(createLogStreamRequest);
        }

    }
    
    private boolean isBlank(String string) {
        return null == string || string.trim().length() == 0;
    }
    protected String getSimpleStacktraceAsString(final Throwable thrown) {
        final StringBuilder stackTraceBuilder = new StringBuilder();
        for (StackTraceElement stackTraceElement : thrown.getStackTrace()) {
            new Formatter(stackTraceBuilder).format("%s.%s(%s:%d)%n",
                    stackTraceElement.getClassName(),
                    stackTraceElement.getMethodName(),
                    stackTraceElement.getFileName(),
                    stackTraceElement.getLineNumber());
        }
        return stackTraceBuilder.toString();
    }

    @Override
    public void start() {
        super.start();
    }

    @Override
    public void stop() {
        super.stop();
        while (loggingEventsQueue != null && !loggingEventsQueue.isEmpty()) {
            this.sendMessages();
        }
    }

    @Override
    public String toString() {
        return CloudwatchAppender.class.getSimpleName() + "{"
                + "name=" + getName() + " loggroupName=" + logGroupName
                +" logstreamName=" + logStreamName;
               
    }

    @PluginFactory
    @SuppressWarnings("unused")
    public static CloudwatchAppender createCloudWatchAppender(
      @PluginAttribute(value = "queueLength" ) Integer queueLength,
                                                  @PluginElement("Layout") Layout layout,
                                                  @PluginAttribute(value = "logGroupName") String logGroupName,
                                                  @PluginAttribute(value = "logStreamName") String logStreamName,
                                                  @PluginAttribute(value = "name") String name,
                                                  @PluginAttribute(value = "ignoreExceptions", defaultBoolean = false) Boolean ignoreExceptions,
                                                  
                                                  @PluginAttribute(value = "messagesBatchSize") Integer messagesBatchSize)
    {
     return new CloudwatchAppender(name, layout, null, ignoreExceptions, logGroupName, logStreamName ,queueLength,messagesBatchSize);
    }
}

我们在pom.xml文件中添加依赖项。

<dependency>
   <groupId>com.amazonaws</groupId>
   <artifactId>aws-java-sdk-logs</artifactId>
   <!-- for local 3.8.5 we need to use this version cloudhub 3.8.5 has jackson 2.6.6 -->
   <!-- <version>1.9.40</version> -->
   <version>1.11.105</version>
   <exclusions>
    <exclusion>  <!-- declare the exclusion here -->
     <groupId>org.apache.logging.log4j</groupId>
     <artifactId>log4j-1.2-api</artifactId>
    </exclusion>
    <exclusion>  <!-- declare the exclusion here -->
     <groupId>com.fasterxml.jackson.core</groupId>
     <artifactId>jackson-core</artifactId>
    </exclusion>
    <exclusion>  <!-- declare the exclusion here -->
     <groupId>com.fasterxml.jackson.core</groupId>
     <artifactId>jackson-databind</artifactId>
    </exclusion>
   </exclusions>
  </dependency>
  <!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-api -->
  <dependency>
   <groupId>org.apache.logging.log4j</groupId>
   <artifactId>log4j-api</artifactId>
   <version>2.5</version>
  </dependency>
  <!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core -->
  <dependency>
   <groupId>org.apache.logging.log4j</groupId>
   <artifactId>log4j-core</artifactId>
   <version>2.5</version>
  </dependency>

现在我们需要修改我们的log4j2.xml。 还要添加自定义cloudwatch附加程序和CloudhubLogs附加程序,以便我们也可以获取cloudhub上的日志。

<?xml version="1.0" encoding="utf-8"?>
<Configuration status="trace" packages="au.edu.vu.appenders,com.mulesoft.ch.logging.appender">

 <!--These are some of the loggers you can enable. 
     There are several more you can find in the documentation. 
        Besides this log4j configuration, you can also use Java VM environment variables
        to enable other logs like network (-Djavax.net.debug=ssl or all) and 
        Garbage Collector (-XX:+PrintGC). These will be append to the console, so you will 
        see them in the mule_ee.log file. -->


    <Appenders>
         <CLOUDW name="CloudW" logGroupName="test-log-stream" 
        logStreamName="test44" messagesBatchSize="${sys:cloudwatch.msg.batch.size}" queueLength="${sys:cloudwatch.queue.length}">
   <PatternLayout pattern="%d [%t] %-5p %c - %m%n"/>
  </CLOUDW>
  
  <Log4J2CloudhubLogAppender name="CLOUDHUB"
                                   addressProvider="com.mulesoft.ch.logging.DefaultAggregatorAddressProvider"
                                   applicationContext="com.mulesoft.ch.logging.DefaultApplicationContext"
                                   appendRetryIntervalMs="${sys:logging.appendRetryInterval}"
                                   appendMaxAttempts="${sys:logging.appendMaxAttempts}"
                                   batchSendIntervalMs="${sys:logging.batchSendInterval}"
                                   batchMaxRecords="${sys:logging.batchMaxRecords}"
                                   memBufferMaxSize="${sys:logging.memBufferMaxSize}"
                                   journalMaxWriteBatchSize="${sys:logging.journalMaxBatchSize}"
                                   journalMaxFileSize="${sys:logging.journalMaxFileSize}"
                                   clientMaxPacketSize="${sys:logging.clientMaxPacketSize}"
                                   clientConnectTimeoutMs="${sys:logging.clientConnectTimeout}"
                                   clientSocketTimeoutMs="${sys:logging.clientSocketTimeout}"
                                   serverAddressPollIntervalMs="${sys:logging.serverAddressPollInterval}"
                                   serverHeartbeatSendIntervalMs="${sys:logging.serverHeartbeatSendIntervalMs}"
                                   statisticsPrintIntervalMs="${sys:logging.statisticsPrintIntervalMs}">

            <PatternLayout pattern="[%d{MM-dd HH:mm:ss}] %-5p %c{1} [%t] CUSTOM: %m%n"/>
        </Log4J2CloudhubLogAppender>
        
    </Appenders>
    <Loggers>
     
     
  <!-- Http Logger shows wire traffic on DEBUG -->
  <AsyncLogger name="org.mule.module.http.internal.HttpMessageLogger" level="WARN"/>
 
  <!-- JDBC Logger shows queries and parameters values on DEBUG -->
  <AsyncLogger name="com.mulesoft.mule.transport.jdbc" level="WARN"/>
    
        <!-- CXF is used heavily by Mule for web services -->
        <AsyncLogger name="org.apache.cxf" level="WARN"/>

        <!-- Apache Commons tend to make a lot of noise which can clutter the log-->
        <AsyncLogger name="org.apache" level="WARN"/>

        <!-- Reduce startup noise -->
        <AsyncLogger name="org.springframework.beans.factory" level="WARN"/>

        <!-- Mule classes -->
        <AsyncLogger name="org.mule" level="INFO"/>
        <AsyncLogger name="com.mulesoft" level="INFO"/>

        <!-- Reduce DM verbosity -->
        <AsyncLogger name="org.jetel" level="WARN"/>
        <AsyncLogger name="Tracking" level="WARN"/>
        
        <AsyncRoot level="INFO">
            <AppenderRef ref="CLOUDHUB" level="INFO"/>
            <AppenderRef ref="CloudW" level="INFO"/>
        </AsyncRoot>
    </Loggers>
</Configuration>

最后,我们需要在cloudhub运行时管理器上禁用cloudhub日志。

这适用于cloudhub mule运行时版本3.8.4。 cloudhub 3.8.5版本存在一些问题,该版本已正确初始化并发送日志,但是缺少事件和消息。

翻译自: https://www.javacodegeeks.com/2017/10/integrate-cloudwatch-logs-cloudhub-mule.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值