kafka消费者的共用实现逻辑

这个是在springboot下用的哦。

先定义一个接口

/**
 * 所有消费者都要实现该接口
 * 这是消费者接收消息的入口
 */
public interface KafkaMessageProcess {

	void process(String message) throws Exception;

}

加载kafka配置文件

package com.sea.kafka.receiver;

import java.io.InputStream;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import javax.annotation.PostConstruct;

import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import com.amarsoft.sea.kafka.KafkaConsumerProcess;
import com.amarsoft.sea.kafka.MessageProcessor;

/**
 */
@Component
public class YszxEntICAlterMessageRecevier {

    private static final Logger LOGGER = LoggerFactory.getLogger(YszxEntICAlterMessageRecevier.class);

	@Value("${kafka.configpath:/kafka.properties}")
	private String CONFIGPATH;
	
	@Value("${kafka.threadnum:5}")
	private int THREADNUM;
	
	@Value("${kafka.yszxtopic:hubservice_yszx_ic_alter}")
	private String SAVEDATA_TOPIC;

    @Autowired
    private MessageProcessor messageProcessor;
    
    private Thread thread;

	@PostConstruct
	public void init() throws Exception{
		Properties props = new Properties();
		try(InputStream in = YszxEntICAlterMessageRecevier.class.getResourceAsStream(CONFIGPATH)){
			props.load(in);
			LOGGER.info("加载YszxEntICAlterMessageRecevier的kafka配置文件成功:"+props.toString());
		}catch(Exception e){
			LOGGER.error("加载YszxEntICAlterMessageRecevier的kafka配置文件失败,"+e.getMessage(),e);
			throw e;
		}
		LOGGER.info("kafka消费者初始化,consumers=YszxEntICAlterMessageRecevier,threadnum="+THREADNUM+",topic="+SAVEDATA_TOPIC);
		KafkaConsumerProcess listener = new KafkaConsumerProcess(SAVEDATA_TOPIC,new KafkaConsumer<>(props),messageProcessor);		
		thread = new Thread(listener);
		thread.start();
		LOGGER.info("YszxEntICAlterMessageRecevier完成kafka消费者的初始化...");
	}
}

再来一个接收kafka的客户端,先将消息一次性接收,接收完成放到队列即为消费成功,但是其实一般消费者不需要放到队列,来了就直接消费了

package com.sea.kafka;

import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * 监听kafka的消息,指定topic和消费者
 *
 */
public class KafkaConsumerProcess implements Runnable {
	
	private static final Logger LOGGER = LoggerFactory.getLogger(KafkaConsumerProcess.class);
	
	private KafkaConsumer<String,String> consumer;
	
	private String topic;
	
	private MessageProcessor messageProcessor;
	
	public KafkaConsumerProcess(String topic, KafkaConsumer<String,String> consumer,MessageProcessor messageProcessor) {
		this.topic = topic;
		this.consumer = consumer;
		this.messageProcessor = messageProcessor;
	}

	public String getTopic() {
		return topic;
	}

	public void setTopic(String topic) {
		this.topic = topic;
	}

	@Override
	public void run() {
		try {
			consumer.subscribe(Arrays.asList(topic));
			while (true) {
				process(consumer);
			}
		} catch (Exception e) {
			LOGGER.error(e.getMessage(),e);
		}finally{
			 if(this.consumer != null){
			     this.consumer.close();
			 }
			 LOGGER.info("kafka-consumer-Thread-shutdown success.....");
		}
	}
	
	/**
	 * 处理逻辑 messageProcess作为消费者处理消息
	 * @param customer
	 * @throws Exception 
	 */
	private void process(KafkaConsumer<String, String> customer) {
		ConsumerRecords<String, String> records = customer.poll(1000);
		Set<TopicPartition> parts = records.partitions();
		if(parts != null && parts.size()>0){
			for (TopicPartition topicPartition : parts) {
				long offset = 0;//消息消费的数据偏移量
				List<ConsumerRecord<String, String>> record = records.records(topicPartition);
				if(record != null && record.size()>0){
					for (ConsumerRecord<String, String> record2 : record) {
						offset = record2.offset()+1;
						messageProcessor.add(record2.value());
					}
					customer.seek(topicPartition,offset);//手动定位offset的位置
					customer.commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset)));
					//消费完之后提交一个topic的partition,而不是所有的partition
				}
			}
		}
	}
	
}

然后再来个消息线程监听线程,一旦有消息过来,就消息队列拿消息,并且消费

package com.sea.kafka;

import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import javax.annotation.PostConstruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.stereotype.Component;

@Component
public class MessageProcessor {
	
	private final Logger LOGGER = LoggerFactory.getLogger(MessageProcessor.class);
	
	private BlockingQueue<String> queue = new LinkedBlockingQueue<>();
	
	@Autowired
    @Qualifier("yszxEntICAlterMessageHandler")
    private KafkaMessageProcess messageHandler;
		
	private Thread thread;
	
	@PostConstruct
	public void init() {
		thread = new Thread(new Work(), "消息处理线程");
		thread.start();
	}
	
	public void add(String message) {
		queue.add(message);
	}
	
	private class Work implements Runnable {

		@Override
		public void run() {
			while (true) {
				try {
					String message = queue.take();
					messageHandler.process(message);
				} catch (Exception e) {
					LOGGER.error("", e);;
				}
			}
		}
	}

}

最后定义一个消费者实现类,来指定这波消息要怎么操作,保存还是什么.

package com.sea.kafka.handler;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import com.amarsoft.sea.kafka.KafkaMessageProcess;
import com.amarsoft.sea.processor.icdata.icalter.yszx.EntICAlterYszx;

/**
 * 解析消息处理器
 */
@Component("yszxEntICAlterMessageHandler")
public class YszxEntICAlterMessageHandler implements KafkaMessageProcess{	
	
	private static final Logger LOGGER = LoggerFactory.getLogger(YszxEntICAlterMessageHandler.class);
	
	@Autowired
	private EntICAlterYszx ICAlter;

	@Override
	public void process(String message) throws Exception {
		try {
			ICAlter.processMsg(message);
		} catch (Exception e) {
			LOGGER.error("消息处理出错!", e);
		}
		
	}
}

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Kafka消费者代码实现key的过程如下: 1. 创建KafkaConsumer对象,指定key和value的反序列化器。 2. 调用subscribe()方法订阅主题。 3. 调用poll()方法获取消息记录,返回的是ConsumerRecords对象。 4. 遍历ConsumerRecords对象,获取每个消息记录的key和value。 以下是一个简单的Kafka消费者代码实现key的示例: ``` import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.serialization.StringDeserializer; import java.util.Collections; import java.util.Properties; public class KafkaConsumerExample { public static void main(String[] args) { Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "test-group"); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Collections.singletonList("test-topic")); while (true) { ConsumerRecords<String, String> records = consumer.poll(1000); for (ConsumerRecord<String, String> record : records) { String key = record.key(); String value = record.value(); System.out.println("key: " + key + ", value: " + value); } } } } ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值