前言
平常我们在使用RocketMQ消息队列消费时,基本使用的都是push模式,有些时候我们也需要通过pull方式来实现我们的功能。我们都知道RocketMQ的push模式底层就是使用的pull模式来实现的,所以我们其实可以自己来实现一些push的功能。别的不说,比较忙。直接上代码。
亮点
1.实现了push模式的offset、如果不实现这个功能,我们需要自己手动去维护这个offset、我想要一个一劳永逸的办法,不想自己去维护,我们知道push模式就是RocketMQ自己帮我们维护了,我通过源码,调用了push模式更新offset的方法。
2.实现了集群下的消息队列的负载工作,原理和源码里面实现的差不多。我默认使用的是平局负载的策略。
代码
import lombok.extern.slf4j.Slf4j;
import org.apache.rocketmq.client.consumer.DefaultMQPullConsumer;
import org.apache.rocketmq.client.consumer.PullResult;
import org.apache.rocketmq.client.consumer.PullStatus;
import org.apache.rocketmq.client.consumer.store.OffsetStore;
import org.apache.rocketmq.client.consumer.store.ReadOffsetType;
import org.apache.rocketmq.client.consumer.store.RemoteBrokerOffsetStore;
import org.apache.rocketmq.client.exception.MQClientException;
import org.apache.rocketmq.client.impl.FindBrokerResult;
import org.apache.rocketmq.client.impl.MQClientManager;
import org.apache.rocketmq.client.impl.factory.MQClientInstance;
import org.apache.rocketmq.common.message.MessageExt;
import org.apache.rocketmq.common.message.MessageQueue;
import org.apache.rocketmq.common.protocol.header.UpdateConsumerOffsetRequestHeader;
import org.apache.rocketmq.common.protocol.heartbeat.MessageModel;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Configuration;
import org.springframework.util.CollectionUtils;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
* @author niezhiliang
* @date 2021/4/19 18:41
*/
@Configuration
@Slf4j
public class PullConsumerConfiguration {
/**
* nameServer地址
*/
@Value("${rocketmq.name-server}")
private String nameServer;
/**
* 设备在离线状态
*/
public static final String TOPIC = "iot-up-comm-status";
/**
* 消费者组
*/
public static final String GROUPID = "energy-assert-group-test";
/**
* MQ客户端
*/
protected MQClientInstance mQClientFactory = null;
/**
* 消费者
*/
private DefaultMQPullConsumer consumer;
/**
* 负载后的队列,防止topic的队列数变更。
* 如果存在consumer集群会进行负载均衡
*/
private List<MessageQueue> allocateResult = null;
/**
* 拉取最新的offset
*/
private OffsetStore offsetStore;
/**
* 一次最多拉去多少条消息
*/
private static final Integer MAX_MESSAGE_SIZE = 1000;
/**
* 消息拉取间隔(单位秒)
*/
private static final Integer PULL_PERIO = 30;
private static final String TAG = "*";
/**
* 更新负载队列定时器
*/
private final Timer timer = new Timer("DefaultMQPullConsumer-Rebalance-Timer", true);
/**
* 消费消息
*/
public void consumerMessage() throws InterruptedException {
while (true) {
try {
//只有负载时分到了队列才走下面的
if (!CollectionUtils.isEmpty(allocateResult)) {
//处理负载队列的所有消息体
List<MessageExt> messageExts = new ArrayList<>(500);
for (MessageQueue queue : allocateResult) {
//从内存中直接获取当前队列的偏移量 取不到通过请求broker获取
long curOffset = offsetStore.readOffset(queue, ReadOffsetType.MEMORY_FIRST_THEN_STORE);
//消息拉取
PullResult result = consumer.pull(queue, TAG, curOffset, MAX_MESSAGE_SIZE);
//拉取到了消息,进行后续处理
if (result.getPullStatus().equals(PullStatus.FOUND)) {
//将所有的消息放入一个集合中,统一处理
messageExts.addAll(result.getMsgFoundList());
// 调用此方法进行偏移量更新到broker
updateConsumerOffset(queue,result.getNextBeginOffset());
}
}
//打印得到的消息
if (!CollectionUtils.isEmpty(messageExts)) {
for (MessageExt messageExt : messageExts) {
System.out.println(new String(messageExt.getBody()));
}
}
}
} catch (Exception e) {
log.error("消费消息异常:",e);
}
//线程睡30s 再拉取消息
TimeUnit.SECONDS.sleep(PULL_PERIO);
}
}
/**
* 修改消费offset
* @param mq
* @param offset
* @throws Exception
*/
public void updateConsumerOffset(MessageQueue mq,Long offset) throws Exception {
//通过broker缓存中取broker地址
FindBrokerResult findBrokerResult = this.mQClientFactory.findBrokerAddressInAdmin(mq.getBrokerName());
//取不到取nameserver取
if (null == findBrokerResult) {
this.mQClientFactory.updateTopicRouteInfoFromNameServer(mq.getTopic());
findBrokerResult = this.mQClientFactory.findBrokerAddressInAdmin(mq.getBrokerName());
}
if (findBrokerResult != null) {
UpdateConsumerOffsetRequestHeader requestHeader = new UpdateConsumerOffsetRequestHeader();
requestHeader.setTopic(mq.getTopic());
requestHeader.setConsumerGroup(this.GROUPID);
requestHeader.setQueueId(mq.getQueueId());
requestHeader.setCommitOffset(offset);
//更新内存中的offset
this.offsetStore.updateOffset(mq,offset,false);
//请求broker更新offset
this.mQClientFactory.getMQClientAPIImpl().updateConsumerOffsetOneway(findBrokerResult.getBrokerAddr(), requestHeader, 1000 * 5);
} else {
log.error("The broker[" + mq.getBrokerName() + "] not exist");
}
}
/**
* consumer初始化
* 启动消费者、启动负载定时任务
* @throws Exception
*/
@PostConstruct
public void init() throws Exception {
consumer = new DefaultMQPullConsumer(GROUPID);
consumer.setNamesrvAddr(nameServer);
//集群消费
consumer.setMessageModel(MessageModel.CLUSTERING);
//启动消费者
consumer.start();
//客户端实例化
mQClientFactory = MQClientManager.getInstance().getAndCreateMQClientInstance(consumer);
//实例化offsetStore(broker拉取当前消费offset)
this.offsetStore = new RemoteBrokerOffsetStore(this.mQClientFactory, GROUPID);
//20手动负载队列
timer.schedule(doRebalanceTask(), new Date(), 20000);
//消息消费
consumerMessage();
}
/**
* 容器关闭时,将消费者关闭
*/
@PreDestroy
public void destory() {
consumer.shutdown();
}
/**
* 消息队列的重新负载
*/
public void doRebalance() {
//同一个group中,监听该topic的consumer ip@port
List<String> cidAll = this.mQClientFactory.findConsumerIdList(TOPIC, GROUPID);
//对MessageQueue排序
Set<MessageQueue> messageQueues = new HashSet<>();
try {
messageQueues = consumer.fetchSubscribeMessageQueues(TOPIC);
} catch (MQClientException e) {
log.error("手动拉取队列异常:",e);
}
List<MessageQueue> mqAll = new ArrayList<>(messageQueues);
//排序
Collections.sort(mqAll);
//都为空无需负载
if (CollectionUtils.isEmpty(mqAll) || CollectionUtils.isEmpty(cidAll)) {
return;
}
try {
//根据负载策略得到消费的MessageQueues,默认使用的是平均负载策略
allocateResult = consumer.getAllocateMessageQueueStrategy().allocate(GROUPID, this.mQClientFactory.getClientId(),mqAll, cidAll);
} catch (Throwable e) {
log.error("负载异常:",e);
}
}
/**
* 定时更新队列的负载信息,
* 程序运行中,队列数是可能发生变化的
* @return
*/
private TimerTask doRebalanceTask() {
return new TimerTask() {
@Override
public void run() {
log.info("messgeQueue doRebalance begin.....");
try {
doRebalance();
} catch (Throwable e) {
log.error("doRebalance begin error:{}", e);
}
}
};
}
}