Springboot集成Kafaka

package com.aicrs.engine.kafaka;

import com.aicrs.engine.common.LogInfo;
import com.aicrs.engine.common.RedisNameEnum;
import com.aicrs.engine.component.DataSourceSwitchCompont;
import com.aicrs.engine.constant.SwitchConstant;
import com.aicrs.engine.entity.DetailLogEntity;
import com.aicrs.engine.entity.QueryLogEntity;
import com.aicrs.engine.kafaka.Singleton.DetailLogSingletonFactory;
import com.aicrs.engine.kafaka.Singleton.QueryLogSingletonFactory;
import com.aicrs.engine.mapper.DetailLogEntityMapper;
import com.aicrs.engine.mapper.QueryLogEntityMapper;
import com.aicrs.engine.utils.DateUtil;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.CollectionUtils;
import javax.annotation.PostConstruct;
import java.util.*;
import java.util.concurrent.BlockingQueue;

@Component
public class kafkaConsumerManager {
    private static final Logger logger = LoggerFactory.getLogger(kafkaConsumerManager.class);

    @Autowired
    KafkaConsumer<String, String> consumer;
    @Autowired
    RedisTemplate<String, String> logSwitchRedis;
    @Autowired
    private DataSourceSwitchCompont dataSourceSwitchCompont;
    @Autowired
    private QueryLogEntityMapper queryLogEntityMapper;
    @Autowired
    private DetailLogEntityMapper detailLogEntityMapper;

    @PostConstruct
    public void kafkaConsumer() {
        //日志接收kafka的开关
        if (!SwitchConstant.LOG_TO_KAFKA_CLOSE.equals(dataSourceSwitchCompont.getDataSourceSign(SwitchConstant.LOG_TO_KAFKA_SWITCH))) {
            try {
                consumer.subscribe(Arrays.asList(LogInfo.KAFAKA_TOPIC));

                new Thread(()->{
                    while (true) {
                        consumeKafakaMessageAndSaveLog();
                    }
                }).start();
            } catch (Exception e) {
                logger.error("kafka 日志报错{}", e);
            }
        }

    }

    /**
     * description 消费Kafaka消息并存储数据到日志表
     * return void
     * author 
     * createTime 2021-1-12 16:46
     **/
    private void consumeKafakaMessageAndSaveLog() {
        ConsumerRecords<String, String> records = consumer.poll(100);
        if(records.isEmpty()){
            return;
        }
        List<QueryLogEntity> queryLogEntityList = null;
        List<DetailLogEntity> detailLogEntityList = null;
        BlockingQueue<List<QueryLogEntity>> queryLogBlockingQueue = QueryLogSingletonFactory.getInstance();
        BlockingQueue<List<DetailLogEntity>> detailLogBlockingQueue = DetailLogSingletonFactory.getInstance();
        for (ConsumerRecord<String, String> record : records){
            logger.error("Start Consuming Kafaka Message: ---> " + record);
            //QUERY_LOG表
            queryLogEntityList = makeupQueryLogEntityList(queryLogEntityList, record);
            //DETAIL_LOG表
            detailLogEntityList = makeupDetailLogEntityList(detailLogEntityList, record);

            if (!CollectionUtils.isEmpty(queryLogEntityList)) {
                //将queryLogEntityList加到BlockingQueue里,如果BlockingQueue可以容纳,则返回true,否则返回false。(本方法不阻塞当前执行方法的线程)
                boolean flag = queryLogBlockingQueue.offer(queryLogEntityList);
                if (!flag) {
                    logger.error("queryLogBlockingQueue is full...");
                }
            }

            if (!CollectionUtils.isEmpty(detailLogEntityList)) {
                boolean flag = detailLogBlockingQueue.offer(detailLogEntityList);
                if (!flag) {
                    logger.error("detailLogEntityList is full...");
                }
            }

            if(CollectionUtils.isEmpty(queryLogBlockingQueue) && CollectionUtils.isEmpty(detailLogBlockingQueue)){
                try {
                    Thread.sleep(3000);
                } catch (InterruptedException e) {
                    logger.error("线程执行异常:{}", e);
                }
            }

            if(!CollectionUtils.isEmpty(queryLogBlockingQueue)){
                List<List<QueryLogEntity>> saveQueryLogEntityList = Collections.synchronizedList(new LinkedList<>());
                queryLogBlockingQueue.drainTo(saveQueryLogEntityList, queryLogBlockingQueue.size());
                queueQueryLogSave(saveQueryLogEntityList);
            }

            if(!CollectionUtils.isEmpty(detailLogBlockingQueue)){
                List<List<DetailLogEntity>> saveDetailLogEntityList = Collections.synchronizedList(new LinkedList<>());
                detailLogBlockingQueue.drainTo(saveDetailLogEntityList, detailLogBlockingQueue.size());
                queueDetailLogSave(saveDetailLogEntityList);
            }
        }
    }

    /**
     * description 异步存储“QUERY_LOG”表数据
     * param [logList]
     * return void
     * author 
     * createTime 2021-1-12 15:17
     **/
    @Async
    @Transactional
    private void queueQueryLogSave(final List<List<QueryLogEntity>> logList) {
        if(CollectionUtils.isEmpty(logList)) {
            return;
        }
        String logSwitch = logSwitchRedis.opsForValue().get("logSwitch");
        if (RedisNameEnum.LOG_TO_DB_KAFKA_SWITCH_CLOSE.getKeyName().equals(logSwitch)) {
            return;
        }
        List<QueryLogEntity> saveLogRecordList = Collections.synchronizedList(new LinkedList<>());
        for (List<QueryLogEntity> list : logList) {
            saveLogRecordList.addAll(list);
        }
        logList.clear();

        if (!CollectionUtils.isEmpty(saveLogRecordList)) {
            if (saveLogRecordList.size() >= 100) {
                List<List<QueryLogEntity>> tempsQueryLogEntityList = split(saveLogRecordList, 100);
                for (List<QueryLogEntity> queryLogEntity : tempsQueryLogEntityList) {
                    queryLogEntityMapper.insertQueryLogEntityByBatch(queryLogEntity);
                }
                return;
            }
            queryLogEntityMapper.insertQueryLogEntityByBatch(saveLogRecordList);
        }

    }

    /**
     * description 异步存储“DETAIL_LOG”表数据
     * param [logList]
     * return void
     * author 
     * createTime 2021-1-12 15:17
     **/
    @Async
    @Transactional
    private void queueDetailLogSave(final List<List<DetailLogEntity>> logList) {
        if(CollectionUtils.isEmpty(logList)) {
            return;
        }
        String logSwitch = logSwitchRedis.opsForValue().get("logSwitch");
        if (RedisNameEnum.LOG_TO_DB_KAFKA_SWITCH_CLOSE.getKeyName().equals(logSwitch)) {
            return;
        }
        List<DetailLogEntity> saveLogRecordList = Collections.synchronizedList(new LinkedList<>());
        for (List<DetailLogEntity> list : logList) {
            saveLogRecordList.addAll(list);
        }
        logList.clear();

        if (!CollectionUtils.isEmpty(saveLogRecordList)) {
            if (saveLogRecordList.size() >= 100) {
                List<List<DetailLogEntity>> tempsQueryLogEntityList = split(saveLogRecordList, 100);
                for (List<DetailLogEntity> detailLogEntity : tempsQueryLogEntityList) {
                    detailLogEntityMapper.insertDetailLogEntityByBatch(detailLogEntity);
                }
                return;
            }
            detailLogEntityMapper.insertDetailLogEntityByBatch(saveLogRecordList);
        }

    }

    /**
     * description 封装“QUERY_LOG”的数据
     * param [queryLogEntityList, record]
     * return java.util.List<com.aicrs.engine.entity.QueryLogEntity>
     * author 
     * createTime 2021-1-12 15:15
     **/
    private List<QueryLogEntity> makeupQueryLogEntityList(List<QueryLogEntity> queryLogEntityList, ConsumerRecord<String, String> record) {
        if(LogInfo.KEY_QUERY_LOG.equals(record.key())){//QUERY_LOG表
            String[] kafakaReturnrecordArr = record.value().split(LogInfo.EXCISION,-1);
            queryLogEntityList = Collections.synchronizedList(new LinkedList<>());
            QueryLogEntity queryLogEntity = new QueryLogEntity();
            queryLogEntity.setRequestUuid(kafakaReturnrecordArr[0]);
            queryLogEntity.setQueryTime(DateUtil.stringToDate(kafakaReturnrecordArr[1]));
            queryLogEntity.setCustomer(kafakaReturnrecordArr[2]);
            queryLogEntity.setReqCompany(kafakaReturnrecordArr[3]);
            queryLogEntity.setUserName(kafakaReturnrecordArr[4]);
            queryLogEntity.setErrorCode(kafakaReturnrecordArr[5]);
            queryLogEntity.setErrorMessage(kafakaReturnrecordArr[6]);
            queryLogEntityList.add(queryLogEntity);
        }
        return queryLogEntityList;
    }

    /**
     * description 封装“DETAIL_LOG”的数据
     * param [detailLogEntityList, record]
     * return java.util.List<com.aicrs.engine.entity.DetailLogEntity>
     * author 
     * createTime 2021-1-12 15:16
     **/
    private List<DetailLogEntity> makeupDetailLogEntityList(List<DetailLogEntity> detailLogEntityList, ConsumerRecord<String, String> record) {
        if(LogInfo.KEY_DETAIL_LOG.equals(record.key())){//DETAIL_LOG表
            String[] kafakaReturnrecordArr = record.value().split(LogInfo.EXCISION,-1);
            detailLogEntityList = Collections.synchronizedList(new LinkedList<>());
            DetailLogEntity detailLogEntity = new DetailLogEntity();
            detailLogEntity.setRequestUuid(kafakaReturnrecordArr[0]);
            detailLogEntity.setIsIn(kafakaReturnrecordArr[1]);
            detailLogEntity.setCpname(kafakaReturnrecordArr[2]);
            detailLogEntity.setUniteCode(kafakaReturnrecordArr[3]);
            detailLogEntity.setOrgCode(kafakaReturnrecordArr[4]);
            detailLogEntity.setHitCode(kafakaReturnrecordArr[5]);
            detailLogEntity.setHitColumn(kafakaReturnrecordArr[6]);
            detailLogEntity.setIdAicrsAppBlackList(kafakaReturnrecordArr[7]);
            detailLogEntity.setCpblackflag(kafakaReturnrecordArr[8]);
            detailLogEntity.setIsOwner(kafakaReturnrecordArr[9]);
            detailLogEntity.setFirstType(kafakaReturnrecordArr[10]);
            detailLogEntity.setSecondType(kafakaReturnrecordArr[11]);
            detailLogEntity.setIsMajorWaring(kafakaReturnrecordArr[12]);
            detailLogEntity.setMajorWarningDate(kafakaReturnrecordArr[13]);
            detailLogEntity.setTag(kafakaReturnrecordArr[14]);
            detailLogEntity.setReason(kafakaReturnrecordArr[15]);
            detailLogEntityList.add(detailLogEntity);
        }
        return detailLogEntityList;
    }

    /**
     * description List切分为100的分量,便于后续存储
     * param [resList, subListLength]
     * return java.util.List<java.util.List<T>>
     * author 
     * createTime 2021-1-11 15:38
     **/
    public static <T> List<List<T>> split(List<T> resList, int subListLength) {
        if (CollectionUtils.isEmpty(resList) || subListLength <= 0) {
            return new ArrayList<>();
        }
        List<List<T>> ret = new ArrayList<>();
        int size = resList.size();
        if (size <= subListLength) {
            // 数据量不足 subListLength 指定的大小
            ret.add(resList);
        } else {
            int pre = size / subListLength;
            int last = size % subListLength;
            // 前面pre个集合,每个大小都是 subListLength 个元素
            for (int i = 0; i < pre; i++) {
                List<T> itemList = new ArrayList<>();
                for (int j = 0; j < subListLength; j++) {
                    itemList.add(resList.get(i * subListLength + j));
                }
                ret.add(itemList);
            }
            // last的进行处理
            if (last > 0) {
                List<T> itemList = new ArrayList<>();
                for (int i = 0; i < last; i++) {
                    itemList.add(resList.get(pre * subListLength + i));
                }
                ret.add(itemList);
            }
        }
        return ret;
    }
}
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;

/**
 * description 单例模式初始化队列
 * author
 * createTime 2021-1-12 16:18
 **/
public class DetailLogSingletonFactory {
    private static volatile BlockingQueue<List<DetailLogEntity>> detailLogSingleton;

    public DetailLogSingletonFactory() {
    }

    public static BlockingQueue<List<DetailLogEntity>> getInstance() {
        if (detailLogSingleton == null) {
            synchronized (QueryLogSingletonFactory.class) {
                if (detailLogSingleton == null) {
                    detailLogSingleton = new LinkedBlockingQueue<>(5000);
                }
            }
        }
        return detailLogSingleton;
    }
}
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.AfterReturning;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.Map;
import java.util.UUID;


/**
 * description 调用方法时发送日志到kafaka中记录
 * author
 * createTime 2021-1-5 15:51
 **/
@Aspect
@Component
public class KafakaLogAspect {

    private static final Logger logger = LoggerFactory.getLogger(KafakaLogAspect.class);
    @Autowired
    private CommonSaveLog commonSaveLog;

    @Pointcut("execution(* com.aicrs.engine..**.BlackInfoServiceImpl.queryBlackInfos(..))")
    public void log() {
    }

    @AfterReturning(returning = "ret", pointcut = "log()")
    public void doAfterReturning(JoinPoint joinPoint, Object ret) throws Throwable {
        Object[] obj = joinPoint.getArgs();
        if (obj != null && obj.length > 0) {
            if (obj[0] instanceof JSONObject) {
                Map<String,Object> responseMap = (Map<String,Object>)ret;
                DataResponse dataResponse = (DataResponse)responseMap.get("dataResponse");
                List<Map<String,Object>> logParamMapList = (List<Map<String,Object>>)responseMap.get("logParamMap");
                JSONObject dataRequestEntity = (JSONObject) obj[0];
                String uuid = UUID.randomUUID().toString().replaceAll("-", "").toUpperCase();
                commonSaveLog.saveLog(dataRequestEntity, dataResponse, logParamMapList, uuid);
            }
        }
    }

}
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Spring Boot可以很容易地集成Kafka消息队列。您可以使用Spring Boot的相关依赖项和注解来实现此功能。首先,您需要在Spring Boot应用程序的pom.xml文件中添加Kafka的依赖项。您可以使用以下依赖项,这些依赖项将帮助您在应用程序中使用Kafka: ``` <dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> </dependency> ``` 然后,您需要创建一个Kafka配置类,该类包含Kafka相关的配置信息,例如Kafka服务器的地址和端口。在这个配置类中,您可以使用`@Configuration`注解来标记它作为一个配置类,并使用`@EnableKafka`注解来启用Kafka。 接下来,您可以创建一个Kafka消息监听器,该监听器将处理从Kafka主题接收到的消息。您可以使用`@KafkaListener`注解来将方法标记为Kafka监听器,并指定要监听的主题。 最后,您可以在Spring Boot应用程序的任何地方使用Kafka生产者或者消费者来发送或接收消息。您可以使用`KafkaTemplate`来发送消息,使用`@Autowired`来注入`KafkaTemplate`实例。 这就是Spring Boot集成Kafka的基本步骤。通过这种方式,您可以在Spring Boot应用程序中轻松地使用Kafka作为消息队列。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* [springBoot集成Kafka](https://blog.csdn.net/weixin_66545010/article/details/125633876)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 33.333333333333336%"] - *2* [springboot-kafka--消息队列](https://download.csdn.net/download/fqr417252096/10408712)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 33.333333333333336%"] - *3* [springboot整合kafka](https://blog.csdn.net/m0_74642813/article/details/131307133)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 33.333333333333336%"] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值