我在网上没有找到很好的demo,搜索到的都是一些比较旧的版本样例。
公司考虑引入kafka做消息队列。根据实际使用的情况依据官方的demo做了简单封装,由于没有实际运用生产,可能会存在一些问题。
主要就kafkaCallback、ZqKafkaConsumer、ZqKafkaProduct、ZqKafkaUtil这4个类。
ZqKafkaProduct封装了生产者工具。ZqKafkaConsumer是个抽象的消费者接口。
KafkaCallback与生产者关联,做异步发送的回调处理。把成功和失败整在一起,减少使用的复杂度。
ZqKafkaUtil其实就是一个工具配置类。
InitWeb是初始化kafka配置类。
下面是代码:
ZqKafkaProduct.java
package com.zq.third.kafka;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;
public class ZqKafkaProduct{
private static KafkaTemplate<String, String> kafkaTemplate;
public static void send(String topic, String data) {
kafkaTemplate.send(topic, data);
}
public static void send(String topic, String key, String data) {
kafkaTemplate.send(topic,key, data);
}
public static void send(String topic, String key, String data,KafkaCallback callback) {
ListenableFuture<SendResult<String,String>> send = kafkaTemplate.send(topic,key, data);
send.addCallback(callback, callback);
}
public static void send(String topic, String data,KafkaCallback callback) {
ListenableFuture<SendResult<String,String>> send = kafkaTemplate.send(topic, data);
send.addCallback(callback, callback);
}
public static void init(){
kafkaTemplate = new KafkaTemplate<>(ZqKafkaUtil.producerFactory(),true);
}
}
ZqKafkaConsumer.java
package com.zq.third.kafka;
import org.apache.commons.lang3.StringUtils;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ErrorHandler;
import org.springframework.kafka.listener.MessageListener;
import org.springframework.kafka.listener.config.ContainerProperties;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.SchedulingConfigurer;
import org.springframework.scheduling.config.ScheduledTaskRegistrar;
@Configuration
@EnableScheduling
public abstract class ZqKafkaConsumer implements MessageListener<Integer,String>,ErrorHandler,SchedulingConfigurer {
private ConcurrentMessageListenerContainer<String, String> container;
private String startValidCron;
private String stopValidCron;
@Override
public void configureTasks(ScheduledTaskRegistrar taskRegistrar) {
//定时的MQ
if (StringUtils.isNotBlank(startValidCron) && StringUtils.isNotBlank(stopValidCron)) {
//校验开始
taskRegistrar.addCronTask(new Runnable() {
@Override
public void run() {
ZqKafkaConsumer.this.start();
}
}, startValidCron);
//校验结束
taskRegistrar.addCronTask(new Runnable() {
@Override
public void run() {
ZqKafkaConsumer.this.stop();
}
}, stopValidCron);
} else {
//非定时的MQ
this.start();
}
}
public ZqKafkaConsumer(String topic,String startValidCron,String stopValidCron){
ContainerProperties containerProps = new ContainerProperties(topic);
containerProps.setMessageListener(this);
containerProps.setGenericErrorHandler(this);
this.container = new ConcurrentMessageListenerContainer<>(ZqKafkaUtil.consumerFactory(), containerProps);
this.startValidCron = startValidCron;
this.stopValidCron = stopValidCron;
}
public ZqKafkaConsumer(String topic){
this(topic,null,null);
}
public void start(){
if (!container.isRunning()) {
container.start();
}
}
public void stop(){
if (container.isRunning()) {
container.stop();
}
}
}
kafkaCallback.java
package com.zq.third.kafka;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.FailureCallback;
import org.springframework.util.concurrent.SuccessCallback;
public interface KafkaCallback extends SuccessCallback<SendResult<String,String>>,FailureCallback{
}
ZqKafkaUtil.java
package com.zq.third.kafka;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.ProducerFactory;
public class ZqKafkaUtil {
private static final Properties pro = new Properties();
public static void set(String servers,String groupId,String sessionTimeout,String retries){
pro.put("servers", servers);
pro.put("groupId", groupId);
pro.put("sessionTimeout", sessionTimeout);
pro.put("retries", retries);
}
public static ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
public static ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
private static Map<String, Object> producerConfigs() {
Map<String,Object> props = new HashMap<>();
props.put("bootstrap.servers", pro.get("servers"));//Kafka集群连接串
props.put("acks", "all");
props.put("retries", pro.get("retries"));//发送失败时Producer端的重试次数,默认为0
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("request.timeout.ms", 100);
props.put("timeout.ms", 100);
props.put("buffer.memory", 33554432);//消息缓冲池大小
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
return props;
}
public static Map<String, Object> consumerConfigs() {
Map<String,Object> props = new HashMap<>();
props.put("bootstrap.servers", pro.get("servers"));
props.put("fetch.min.bytes", "1");//每次最小拉取的消息大小(byte)Consumer会等待消息积累到一定尺寸后进行批量拉取。默认为1,代表有一条就拉一条
props.put("max.partition.fetch.bytes", 1024*1024*1024);//每次从单个分区中拉取的消息最大尺寸(byte),默认为1M
props.put("group.id",pro.get("groupId"));//同一个group下的多个Consumer不会拉取到重复的消息,不同group下的Consumer则会保证拉取到每一条消息。注意,同一个group下的consumer数量不能超过分区数。
props.put("enable.auto.commit", "true");//是否自动提交已拉取消息的offset。提交offset即视为该消息已经成功被消费,该组下的Consumer无法再拉取到该消息(除非手动修改offset)。默认为true
props.put("auto.commit.interval.ms", "1000");//自动提交offset的间隔毫秒数,默认5000。
props.put("session.timeout.ms", pro.get("sessionTimeout"));//session过期时间
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
return props;
}
}
InitWeb.java
package com.zq.common;
import javax.annotation.Resource;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.context.support.ResourceBundleMessageSource;
import org.springframework.stereotype.Component;
import com.zq.third.kafka.ZqKafkaProduct;
import com.zq.third.kafka.ZqKafkaUtil;
@Component
public class InitWeb implements InitializingBean{
@Resource
private ResourceBundleMessageSource messageSource;
@Override
public void afterPropertiesSet() throws Exception {
String servers = messageSource.getMessage("bootstrap.servers", null, null);
String groupId = messageSource.getMessage("group.id", null, null);
String sessionTimeout = messageSource.getMessage("session.timeout.ms", null, null);
String retries = messageSource.getMessage("retries", null, null);
ZqKafkaUtil.set(servers, groupId, sessionTimeout, retries);
ZqKafkaProduct.init();
}
}
下面是测试消费实现。
MyTestConsumer.java
package com.zq.service;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.stereotype.Component;
import com.zq.third.kafka.ZqKafkaConsumer;
@Component
public class MyTestConsumer extends ZqKafkaConsumer{
public MyTestConsumer() {
super("test_1");
}
@Override
public void onMessage(ConsumerRecord<Integer, String> arg0) {
// TODO Auto-generated method stub
System.out.println(arg0);
}
@Override
public void handle(Exception arg0, ConsumerRecord<?, ?> arg1) {
// TODO Auto-generated method stub
}
}
这个不带定时器的。消费主题是test_1。
MyTest2Consumer.java
package com.zq.service;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.stereotype.Component;
import com.zq.third.kafka.ZqKafkaConsumer;
@Component
public class MyTest2Consumer extends ZqKafkaConsumer {
public MyTest2Consumer() {
super("test_1","0 0 8-18 * * ?","10 0 18 * * ?");
}
@Override
public void onMessage(ConsumerRecord<Integer, String> data) {
System.out.println("我是第二个"+data);
}
@Override
public void handle(Exception thrownException, ConsumerRecord<?, ?> data) {
// TODO Auto-generated method stub
}
}
这个指定时间在8点到18点执行。18点10秒过后就停止消费。