使用InitializingBean 的afterPropertiesSet方法,初始化kafka 消费者
/** <a href="http://www.cpupk.com/decompiler">Eclipse Class Decompiler</a> plugin, Copyright (c) 2017 Chen Chao. */
package com.suning.fsp.common.kfk.lcbas;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Value;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
/**
* 聚合数据回执消费者
*
* @author 16041099
*
*/
public abstract class BasAggregationDataFeedbackConsumer implements InitializingBean, DisposableBean {
private static final Logger LOGGER = LoggerFactory.getLogger(BasAggregationDataFeedbackConsumer.class);
/**
* zkConnect
*/
@Value("${lcbas.zkConnect:#{null}}")
private String zkConnect;
/**
* sessionTimeOut
*/
@Value("${lcbas.sessionTimeOut:#{null}}")
private String sessionTimeOut;
/**
* syncTime
*/
@Value("${lcbas.syncTime:#{null}}")
private String syncTime;
/**
* commitInterval
*/
@Value("${lcbas.commitInterval:#{null}}")
private String commitInterval;
/**
* offsetReset
*/
@Value("${lcbas.offsetReset:#{null}}")
private String offsetReset;
/**
* topic,实现类覆盖
*/
private String topic;
/**
* 线程数,实现类覆盖
*/
private Integer countThread;
/**
* groupId 实现类覆盖
*/
@Value("${lcbas.groupId:#{null}}")
private String groupId;
/**
* 线程池
*/
private ExecutorService executor;
/**
* 配置参数
*/
private Properties props;
/**
* 消费者
*/
private ConsumerConnector consumerConnector;
@Override
public void afterPropertiesSet() throws Exception {
// 初始化配置参数
initProperties();
// 监听kafka,获取数据
getKafkaMsg();
}
/**
* 功能描述: <br>
* 〈获取kafka消息〉
*
*/
public void getKafkaMsg() {
// consumerConfig
if (null == props) {
throw new ExceptionInInitializerError("init props error!");
}
ConsumerConfig consumerConfig = new ConsumerConfig(props);
consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
HashMap<String, Integer> map = new HashMap<String, Integer>();
map.put(topic, countThread);
Map<String, List<KafkaStream<byte[], byte[]>>> topicMessageStreams = consumerConnector
.createMessageStreams(map);
executor = Executors.newFixedThreadPool(countThread);
for (List<KafkaStream<byte[], byte[]>> streams : topicMessageStreams.values()) {
for (final KafkaStream<byte[], byte[]> stream : streams) {
executor.submit(new Runnable() {
public void run() {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> data = it.next();
String kafkaMsg = new String(data.message());
LOGGER.info("From topic:{} msg :{}", topic, kafkaMsg);
try {
processMsg(kafkaMsg);
} catch (RuntimeException e) {
LOGGER.error("do topic msg error.", e);
}
}
}
});
}
}
}
/**
* 功能描述: <br>
* 〈初始化配置参数〉
*
*/
public void initProperties() {
if (null == props) {
props = new Properties();
props.put("zookeeper.connect", zkConnect);
System.out.println(zkConnect + "...........&&&&********");
// group 代表一个消费组
props.put("group.id", this.groupId);
// zk连接超时
props.put("zookeeper.session.timeout.ms", sessionTimeOut);
props.put("zookeeper.sync.time.ms", syncTime);
props.put("auto.commit.interval.ms", commitInterval);
props.put("auto.offset.reset", offsetReset);
// 序列化类
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put("rebalance.max.retries", "5");
props.put("rebalance.backoff.ms", "3100");// 当rebalance发生时,两个相邻retry操作之间需要间隔的时间。
}
}
/**
* @param zkConnect
* the zkConnect to set
*/
public void setZkConnect(String zkConnect) {
this.zkConnect = zkConnect;
}
/**
* @param sessionTimeOut
* the sessionTimeOut to set
*/
public void setSessionTimeOut(String sessionTimeOut) {
this.sessionTimeOut = sessionTimeOut;
}
/**
* @param syncTime
* the syncTime to set
*/
public void setSyncTime(String syncTime) {
this.syncTime = syncTime;
}
/**
* @param commitInterval
* the commitInterval to set
*/
public void setCommitInterval(String commitInterval) {
this.commitInterval = commitInterval;
}
/**
* @param offsetReset
* the offsetReset to set
*/
public void setOffsetReset(String offsetReset) {
this.offsetReset = offsetReset;
}
/**
* @param countThread
* the countThread to set
*/
public void setCountThread(Integer countThread) {
this.countThread = countThread;
}
/**
* @param topic
* the topic to set
*/
public void setTopic(String topic) {
this.topic = topic;
}
/**
* @param groupId
* the groupId to set
*/
public void setGroupId(String groupId) {
this.groupId = groupId;
}
/**
* 功能描述: <br>
* 〈具体业务实现方法〉
*
* @param kafkaMsg
*/
public abstract void processMsg(String kafkaMsg);
/**
* 功能描述: <br>
* 〈销毁方法,关闭线程池〉
*/
@Override
public void destroy() throws Exception {
if (null != consumerConnector) {
try {
consumerConnector.shutdown();
LOGGER.debug("停止消费线程组...");
} catch (RuntimeException e) {
LOGGER.error("停止消费线程组异常.", e);
}
}
if (null != executor) {
executor.shutdownNow();
LOGGER.debug("停止线程池...");
}
}
}
消费逻辑实现
package com.suning.fsp.fund.aggreation.impl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import com.suning.fsp.business.lcbas.LcbasResponseCode;
import com.suning.fsp.common.kfk.lcbas.BasAggregationDataFeedbackConsumer;
import com.suning.fsp.common.kfk.lcbas.dto.BasAggregationDataFeedbackDto;
import com.suning.fsp.common.utils.LcbasUtil;
import com.suning.fsp.fund.aggreation.common.AggrConstants.FeedBackTopic;
import com.suning.fsp.fund.aggreation.dao.FundAggreationPushRecordDao;
import com.suning.fsp.fund.aggreation.intf.AggregationDataFeedbackConsumer;
/**
*
* 账单聚合批量回执消息处理service 实现
* @author liu ao (17020412)
* @created 2017年12月5日 上午9:57:30
*/
@Service("batchFeedbackConsumer")
public class AggrBatchFeedbackConsumerImpl extends
BasAggregationDataFeedbackConsumer implements AggregationDataFeedbackConsumer{
private static final Logger LOG = LoggerFactory.getLogger(AggrBatchFeedbackConsumerImpl.class);
/**
* 线程数--接收实时交易回执
*/
@Value("${lcbas.batch.countThread}")
private Integer countThread;
@Autowired
FundAggreationPushRecordDao aggreationPushRecordDao ;
@Override
public void afterPropertiesSet() throws Exception {
super.setCountThread(countThread);//线程数--实时接收
super.setTopic(FeedBackTopic.BATCH);//topic--实时接收
// 初始化配置参数
initProperties();
// 监听kafka,获取数据
getKafkaMsg();
}
public AggrBatchFeedbackConsumerImpl() {
super.setCountThread(countThread);
super.setTopic(FeedBackTopic.BATCH);
}
@Override
public void processMsg(String kafkaMsg) {
LOG.info("收到账单聚合批量交易回执:{}", kafkaMsg);
try {
BasAggregationDataFeedbackDto dto = LcbasUtil.buildFeedback(kafkaMsg);
if(dto == null) {
return;
}
//服务器端错误,直接忽略
if(LcbasResponseCode.SERVER_ERROR.equals(dto.getRespCode())) {
return;
}
if(aggreationPushRecordDao.deleteAggerationPushRecordBySerialNo(dto.getSerialNo()) == 0){
LOG.info("批量回执消息,清除临时记录失败-serialNo={}", dto.getSerialNo());
}
}catch(Exception e) {
LOG.error("处理批量聚合回执异常", e);
}
}
}
总结:
这个方法将在所有的属性被初始化后调用。
但是会在init前调用。
但是主要的是如果是延迟加载的话,则马上执行。
所以可以在类上加上注解:
import org.springframework.context.annotation.Lazy;
@Lazy(false)
这样spring容器初始化的时候afterPropertiesSet就会被调用。
只需要实现InitializingBean接口就行。
通过调用 afterPropertiesSet 来启动kafka消费者,监听消息