Kafka中间件

application.yml的配置文件

kafka:
  consumerTopic: Java_Pic
  consumerConfig: kafka_consumer.properties
  consumerCommit: true
  producerTopic: Java_Pic
  producerConfig: kafka_producer.properties
  producerEnable: true

消费者配置文件

group.id=GP_STRUCT_K2GBCL_01
bootstrap.servers=10.20
zookeeper.connect=10
enable.auto.commit=false
auto.commit.interval.ms=1000
auto.offset.reset=earliest
max.poll.records=100
session.timeout.ms=100000
max.request.size=104857600

生产者配置文件

bootstrap.servers=
acks=1
max.request.size=104857600
buffer.memory=104857600
retries=3

KafkaConfig

@Component
@ConfigurationProperties(prefix = "kafka")
public class KafkaConfig {
    //消费TOPIC
    private String consumerTopic;
    //消费者相关配置文件路径
    private String consumerConfig;
    //消费者是否提交偏移量
    private boolean consumerCommit;
    //生产者生产TOPIC
    private String producerTopic;
    //生产者相关配置文件路径
    private String producerConfig;
    //生产者是否生产开关
    private boolean producerEnable;

    public String getConsumerTopic() {
        return consumerTopic;
    }

    public void setConsumerTopic(String consumerTopic) {
        this.consumerTopic = consumerTopic;
    }

    public String getConsumerConfig() {
        return consumerConfig;
    }

    public void setConsumerConfig(String consumerConfig) {
        this.consumerConfig = consumerConfig;
    }

    public boolean isConsumerCommit() {
        return consumerCommit;
    }

    public void setConsumerCommit(boolean consumerCommit) {
        this.consumerCommit = consumerCommit;
    }

    public String getProducerTopic() {
        return producerTopic;
    }

    public void setProducerTopic(String producerTopic) {
        this.producerTopic = producerTopic;
    }

    public String getProducerConfig() {
        return producerConfig;
    }

    public void setProducerConfig(String producerConfig) {
        this.producerConfig = producerConfig;
    }

    public boolean isProducerEnable() {
        return producerEnable;
    }

    public void setProducerEnable(boolean producerEnable) {
        this.producerEnable = producerEnable;
    }

    /**
    *@Description 将给定topic列表字符串拆解成列表
    *@Param [topics] topic列表字符串,中间逗号分隔
    *@Return java.util.List<java.lang.String> topic列表
    *@Author wsun.Frank
    *@Date 2020/8/12
    */
    public static List<String> getTopicLists(String topics) {
        String[] items = topics.split(",");
        List<String> result = new LinkedList<>();
        for (String item : items) {
            if (!StringUtils.isBlank(item)) {
                result.add(StringUtils.trim(item));
            }
        }
        return result;
    }
}

kafka的服务类实现

package cn.com.wind.fm.day7.service;

import cn.com.wind.fm.day7.config.KafkaConfig;
import cn.com.wind.fm.day7.util.PropertyFileUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Future;

@Service
public class KafkaServiceImpl implements KafkaService {
    private static Logger LOGGER = LoggerFactory.getLogger(KafkaServiceImpl.class);
    @Autowired
    private KafkaConfig config;

    private volatile boolean reqClose;
    private volatile boolean closed;

    private Consumer<String, String> messageConsumer;
    private Producer<String, String> messageProducer;

    /**
    *@Description Kafka集群的初始化  包含加载配置文件,消费者、生产者的初始化
    *@Param []
    *@Return void
    *@Author wsun.Frank
    *@Date 2020/8/12
    */
    @Override
    public void init() {
        LOGGER.info("初始化生产者..");
        if (messageProducer == null && config.isProducerEnable()) {
            messageProducer = new KafkaProducer<>(PropertyFileUtil.load(config.getProducerConfig()),
                    new StringSerializer(), new StringSerializer());
        }


        LOGGER.info("初始化消费者..");
        if (messageConsumer == null) {
            messageConsumer = new KafkaConsumer<>(PropertyFileUtil.load(config.getConsumerConfig()),
                    new StringDeserializer(), new StringDeserializer());
            messageConsumer.subscribe(config.getTopicLists(config.getConsumerTopic()));
        }
    }

    /**
    *@Description Kafka消息发送方式
    *@Param [key, value] 消息的key 消息的value
    *@Return void
    *@Author wsun.Frank
    *@Date 2020/8/12
    */
    @Override
    public void produce(String key, String value) {
        //首先判断如果value为空则跳过
        if(StringUtils.isBlank(value)) {
            return;
        }
        RecordMetadata recordMetadata = null;
        if (messageProducer != null) {
            String topic = config.getProducerTopic();

            //1. 发送后不理会发送结果
            messageProducer.send(new ProducerRecord<String, String>(topic, key, value));

            //2. 同步发送(阻塞)
            Future<RecordMetadata> future = messageProducer.send(new ProducerRecord<String, String>(topic, key, value));
            try {
                recordMetadata = future.get();
            } catch (Exception e) {
                LOGGER.error("生产发送错误:",e);
            }
            //可以从返回的原信息中获得很多信息
            long offset = recordMetadata.offset();
            int partition = recordMetadata.partition();
            LOGGER.info(partition + "_" + offset);
            
            //3. 异步回调官方案例 (不阻塞)
            // JavaProducer的send方法会返回一个JavaFuture对象供用户稍后获取发送结果。这就是回调机制
            // RecordMetadata 和 Exception 不可能同时为空,
            // 消息发送成功时,Exception为null,消息发送失败时,metadata为空
            messageProducer.send(
                    new ProducerRecord<String, String>(topic, key, value),
                    new Callback() {
                        @Override
                        public void onCompletion(RecordMetadata metadata, Exception ex) {
                            if (ex != null) {
                            	LOGGER.error("生产发送错误:",ex);
                            } else {
                                // 发送成功
                            	LOGGER.info(metadata.partition() + "_" + metadata.offset());
                            }
                        }
                    });
            LOGGER.info("send方法执行完毕");
        }
    }

    /**
    *@Description 消费者消费消息方法
    *@Param []
    *@Return void
    *@Author wsun.Frank
    *@Date 2020/8/12
    */
    @Override
    public void consume() {
        //计数器
        long retry = 0;
        while (!reqClose) {
            try {
                //此处的poll作用为拉取消息
                ConsumerRecords<String, String> records = messageConsumer.poll(1000);
                for (ConsumerRecord<String, String> record : records) {
                    if (reqClose) {
                        LOGGER.info("跳出消費..");
                        break;
                    }
                    String key =  record.key();
                    String value = record.value();

                }
                if (config.isConsumerCommit()) {
                    messageConsumer.commitSync();
                }
            } catch (Exception ex) {
                if (retry % 60 == 0) {
                    // 防止Kafka失联时,错误过多,控制1分钟出一次日志
                    LOGGER.error("Kafka Operation Failed", ex);
                    retry++;
                }
            }
        }
        // 释放资源
        this.close();
    }

    /**
    *@Description 处理消息的业务方法 暂时为空
    *@Param [msg] kafka消息
    *@Return void
    *@Author wsun.Frank
    *@Date 2020/8/12
    */
    public void consumeRecord(String key,String msg) {
        LOGGER.info("接收到kafka消息,key:{},value:{}",key,msg);
    }

    
    /**
    *@Description 重置消费者偏移量
    *@Param []
    *@Return void
    *@Author wsun.Frank
    *@Date 2020/8/13
    */
    @Override
    public void resetOffset() {
        Consumer<String, String> consumer = new KafkaConsumer<>(PropertyFileUtil.load(config.getConsumerConfig()),
                new StringDeserializer(), new StringDeserializer());
        List<String> topics = config.getTopicLists(config.getConsumerTopic());
        for (String topic : topics) {
            consumer.subscribe(Arrays.asList(topic));
            ConsumerRecords<String, String> records = consumer.poll(2000);
            
            Set<TopicPartition> topicList = consumer.assignment();
            Map<TopicPartition, Long> endMap = consumer.endOffsets(topicList);
            Map<TopicPartition, Long> beginmap = consumer.beginningOffsets(topicList);
            
            long singleTpLagSize = 1300000;
            for (TopicPartition tp : topicList) {
                long endOffset = endMap.get(tp);
                long beginOffset = beginmap.get(tp);
                long aimOffset = endOffset - singleTpLagSize;
                if (aimOffset > 0 && aimOffset >= beginOffset) {
                    consumer.seek(tp, endOffset-singleTpLagSize);
                } else {
                    consumer.seek(tp, beginOffset);
                }
            }
            consumer.commitSync();
        }
    }

    public boolean isClosed() {
        return closed;
    }

    private void close() {
        try {
            if (messageConsumer != null) {
                messageConsumer.close();
            }
            if (messageProducer != null) {
                messageProducer.close();
            }
        } catch (Exception ex) {
            ex.printStackTrace();
        }
        closed = true;
        LOGGER.info("Kafka資源釋放完畢!");
    }

    public void reqClose() {
        this.reqClose = true;
    }
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值