Apache Kafka概念入门

介绍

Apache Kafka® 是 一个分布式流处理平台

开发步骤

  • 添加依赖
<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
    <version>2.2.4.RELEASE</version>
</dependency>
  • 生产者配置
#kafka producer
spring.kafka.bootstrap-servers=IP1:9092,IP2:9092,IP3:9092
spring.kafka.producer.acks=1
spring.kafka.producer.batch-size=16384
spring.kafka.producer.retries=0
spring.kafka.producer.buffer-memory=33554432
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer

生产消息,异步发送

异步发送速度快,可能会推送重复数据,可能记录还没删除掉,查询又查了一次,导致重复推送数据(异步发完就不管了,设置监听器里的处理不具有有序性

@Autowired
private KafkaTemplate<String, Object> kafkaTemplate;

for (OrderDetail orderDetail : list) {
    final boolean[] lowFlag = {false};
    final boolean[] ownerFlag = {false};

    // 清洗级别为 OCR更新 时才发送kafka
    if (Objects.equals(orderDetail.getCleanPrecision(), CleanPrecisionEnum.OCR_UPDATE.getCode())) {
        //异步发送给自己的kafka
        ListenableFuture<SendResult<String, Object>> ownerFuture = kafkaTemplate.send(OrderConstants.OWNER_ORDER_TOPIC, JsonUtil.toJson(orderDetail));
        ownerFuture.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
            @Override
            public void onSuccess(SendResult<String, Object> result) {
                ownerFlag[0] = true;
                log.error("ownerFuture发送kafka成功,数据为:" + result.getProducerRecord().value());

                //两个topic都发送成功后,删除数据
                if (lowFlag[0] && ownerFlag[0]) {
                    int num = nabsOrderDetailService.deleteOrderDetail(orderDetail, dSource);
                    log.error("ownerFuture删除成功数:" + num);
                }
            }

            @Override
            public void onFailure(Throwable throwable) {
                ownerFlag[0] = false;
                log.error("ownerFuture发送kafka失败", throwable);
            }
        });

    } else {
        ownerFlag[0] = true;
    }


    //异步发送给下游的kafka
    OrderDetailDto dto = BeanUtil.copy(orderDetail, OrderDetailDto.class);
    ListenableFuture<SendResult<String, Object>> lowFuture = kafkaTemplate.send(OrderConstants.LOWER_REACHES_ORDER_TOPIC, JsonUtil.toJson(dto));
    lowFuture.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
        @Override
        public void onSuccess(SendResult<String, Object> result) {
            lowFlag[0] = true;
            log.error("lowFuture发送kafka成功,数据为:" + result.getProducerRecord().value());

            //两个topic都发送成功后,删除数据
            if (lowFlag[0] && ownerFlag[0]) {
                int num = nabsOrderDetailService.deleteOrderDetail(orderDetail, dSource);
                log.error("lowFuture删除成功数:" + num);
            }
        }

        @Override
        public void onFailure(Throwable throwable) {
            lowFlag[0] = false;
            log.error("lowFuture发送kafka失败", throwable);
        }
    });


}

生产消息,同步发送

同步发送速度慢,等待发送返回结果,保证顺序执行

protected void handleData(String dSource, List<OrderDetail> list) {
     long startTime = System.currentTimeMillis();

     for (OrderDetail orderDetail : list) {
         boolean ownFlag = true;
         boolean lowFlag = true;

         try {
             // 清洗级别为 OCR更新 时才发送kafka
             if (Objects.equals(orderDetail.getCleanPrecision(), CleanPrecisionEnum.OCR_UPDATE.getCode())) {
                 //同步发送给自己的kafka
                 SendResult<String, Object> ownSendResult = kafkaTemplate.send(OrderConstants.OWNER_ORDER_TOPIC, JsonUtil.toJson(orderDetail)).get();
                 log.error("ownSendResult发送kafka成功,数据为:" + ownSendResult.getProducerRecord().value());
             }
         } catch (Exception e) {
             ownFlag = false;
             log.error("ownSendResult发送kafka失败", e);
         }

         try {
             //同步发送给下游的kafka
             OrderDetailDto dto = BeanUtil.copy(orderDetail, OrderDetailDto.class);
             SendResult<String, Object> lowSendResult = kafkaTemplate.send(OrderConstants.LOWER_REACHES_ORDER_TOPIC, JsonUtil.toJson(dto)).get();
             log.error("lowSendResult发送kafka成功,数据为:" + lowSendResult.getProducerRecord().value());
         } catch (Exception e) {
             lowFlag = false;
             log.error("lowSendResult发送kafka失败", e);
         }

         if (ownFlag && lowFlag) {
             int num = nabsOrderDetailService.deleteOrderDetail(orderDetail, dSource);
             log.error("删除成功数:" + num);
         }


     }

     log.error("当前数据源【" + dSource + "】" + ",查询结果集大小:" + list.size() + ",耗时:" + (System.currentTimeMillis() - startTime) + " ms");

 }

  • 全局监听器
package cn.yto.kafka;

import cn.yto.nabs.common.entity.OrderDetail;
import cn.yto.nabs.common.util.JsonUtils;
import cn.yto.service.NabsOrderDetailService;
import cn.yto.utils.StringUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.stereotype.Component;

/**
 * @Author yanyg
 * @Date 2020/7/17 13:41
 * @Descripetion admin
 */
@Component
@Slf4j
public class KafkaSendResultHandler implements ProducerListener {
    @Autowired
    private NabsOrderDetailService nabsOrderDetailService;

    @Override
    public void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata) {
        log.error("Message send success : " + producerRecord.toString());
        if (StringUtil.isBlank((String) producerRecord.value())) {
            return;
        }

        OrderDetail orderDetail = JsonUtils.json2Object((String) producerRecord.value(), OrderDetail.class);
        int num = nabsOrderDetailService.deleteOrderDetail(orderDetail);
        log.error("num==" + num);
    }

    @Override
    public void onError(ProducerRecord producerRecord, Exception exception) {
        log.error("Message send error : " + producerRecord.toString());
    }
}

  • 消费者配置
#kafka consumer
spring.kafka.bootstrap-servers=IP1:9092,IP2:9092,IP3:9092
#指定消息组
spring.kafka.consumer.group-id=owner-group
#指定消息被消费之后自动提交偏移量,以便下次继续消费
spring.kafka.consumer.enable-auto-commit=true
#指定从最近地方开始消费
spring.kafka.consumer.auto-offset-reset=latest
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer

  • 消费消息
@Component
@Slf4j
public class KafkaMessageConsumer {

    @KafkaListener(topics = {"owner-topic"})
    public void listen(ConsumerRecord<String, String> record) {
        String data = record.value();
        Integer partition = record.partition();
        Long timestamp = record.timestamp();
        Long offset = record.offset();
        log.info("kafka consumer message:{};partition:{};offset:{};timestamp:{}", data, partition, offset, timestamp);
    }

}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值