使用方式
在消费者初始化时注册一个消费监听器,并在消费监听器内部实现消息处理逻辑。由消息队列RocketMQ版的SDK在后台完成消息获取、触发监听器调用以及进行消息重试处理。
//消费示例:使用PushConsumer消费普通消息。
ClientServiceProvider provider = ClientServiceProvider.loadService();
String topic = "Your Topic";
FilterExpression filterExpression = new FilterExpression("Your Filter Tag", FilterExpressionType.TAG);
PushConsumer pushConsumer = provider.newPushConsumerBuilder()
//设置消费者分组。
.setConsumerGroup("Your ConsumerGroup")
//设置接入点。
.setClientConfiguration(ClientConfiguration.newBuilder().setEndpoints("Your Endpoint").build())
//设置预绑定的订阅关系。
.setSubscriptionExpressions(Collections.singletonMap(topic, filterExpression))
//设置消费监听器。
.setMessageListener(new MessageListener() {
@Override
public ConsumeResult consume(MessageView messageView) {
//消费消息并返回处理结果。
return ConsumeResult.SUCCESS;
}
})
.build();
PushConsumer的消费监听器执行结果分为以下三种情况:
- 返回消费成功:以Java SDK为例,返回ConsumeResult.SUCCESS,表示该消息处理成功,服务端按照消费结果更新消费进度。
- 返回消费失败:以Java SDK为例,返回ConsumeResult.FAILURE,表示该消息处理失败,需要根据消费重试逻辑判断是否进行重试消费。
- 出现非预期失败:例如抛异常等行为,该结果按照消费失败处理,需要根据消费重试逻辑判断是否进行重试消费。
PushConsumer消费消息时,若消息处理逻辑出现预期之外的阻塞导致消息处理一直无法执行成功,SDK会按照消费超时处理强制提交消费失败结果,并按照消费重试逻辑进行处理。
消费重试
PushConsumer初始化
startUp(): 实例初始化方法
protected void startUp() throws Exception {
try {
log.info("Begin to start the rocketmq push consumer, clientId={}", clientId);
GaugeObserver gaugeObserver = new ProcessQueueGaugeObserver(processQueueTable, clientId, consumerGroup);
this.clientMeterManager.setGaugeObserver(gaugeObserver);
// 启动客户端的通用方法,生产者和SimpleConsumer也会调用super.startUp方法,如果想了解具体过程可以看最上面的那个链接
super.startUp();
final ScheduledExecutorService scheduler = this.getClientManager().getScheduler();
// 创建ConsumeService对象,持有消费消息MessageListener
this.consumeService = createConsumeService();
// 定期拉取消息并消费
scanAssignmentsFuture = scheduler.scheduleWithFixedDelay(() -> {
try {
scanAssignments();
} catch (Throwable t) {
log.error("Exception raised while scanning the load assignments, clientId={}", clientId, t);
}
}, 1, 5, TimeUnit.SECONDS);
log.info("The rocketmq push consumer starts successfully, clientId={}", clientId);
} catch (Throwable t) {
log.error("Exception raised while starting the rocketmq push consumer, clientId={}", clientId, t);
shutDown();
throw t;
}
}
ConsumeService createConsumeService()
private ConsumeService createConsumeService() {
final ScheduledExecutorService scheduler = this.getClientManager().getScheduler();
if (pushSubscriptionSettings.isFifo()) {
log.info("Create FIFO consume service, consumerGroup={}, clientId={}", consumerGroup, clientId);
return new FifoConsumeService(clientId, messageListener, consumptionExecutor, this, scheduler);
}
log.info("Create standard consume service, consumerGroup={}, clientId={}", consumerGroup, clientId);
return new StandardConsumeService(clientId, messageListener, consumptionExecutor, this, scheduler);
}
void scanAssignments():拉取消息并消费
void scanAssignments() {
try {
log.debug("Start to scan assignments periodically, clientId={}", clientId);
//subscriptionExpressions<topic, 过滤规则>
for (Map.Entry<String, FilterExpression> entry : subscriptionExpressions.entrySet()) {
final String topic = entry.getKey();
final FilterExpression filterExpression = entry.getValue();
// 获取topic对应的MessageQueue集合,Assignments是对MessageQueue集合的封装
final Assignments existed = cacheAssignments.get(topic);
final ListenableFuture<Assignments> future = queryAssignment(topic);
// 成功后,调用了syncProcessQueue方法
Futures.addCallback(future, new FutureCallback<Assignments>() {
@Override
public void onSuccess(Assignments latest) {
if (latest.getAssignmentList().isEmpty()) {
if (null == existed || existed.getAssignmentList().isEmpty()) {
log.info("Acquired empty assignments from remote, would scan later, topic={}, "
+ "clientId={}", topic, clientId);
return;
}
log.info("Attention!!! acquired empty assignments from remote, but existed assignments"
+ " is not empty, topic={}, clientId={}", topic, clientId);
}
if (!latest.equals(existed)) {
log.info("Assignments of topic={} has changed, {} => {}, clientId={}", topic, existed,
latest, clientId);
syncProcessQueue(topic, latest, filterExpression);
cacheAssignments.put(topic, latest);
return;
}
log.debug("Assignments of topic={} remains the same, assignments={}, clientId={}", topic,
existed, clientId);
// Process queue may be dropped, need to be synchronized anyway.
syncProcessQueue(topic, latest, filterExpression);
}
@Override
public void onFailure(Throwable t) {
log.error("Exception raised while scanning the assignments, topic={}, clientId={}", topic,
clientId, t);
}
}, MoreExecutors.directExecutor());
}
} catch (Throwable t) {
log.error("Exception raised while scanning the assignments for all topics, clientId={}", clientId, t);
}
}
ListenableFuture queryAssignment(final String topic)
ListenableFuture<Assignments> queryAssignment(final String topic) {
// 获取topic对应的一个endpoints
final ListenableFuture<Endpoints> future0 = pickEndpointsToQueryAssignments(topic);
return Futures.transformAsync(future0, endpoints -> {
// 封装“查询分配请求”
final QueryAssignmentRequest request = wrapQueryAssignmentRequest(topic);
// 请求超时时间
final Duration requestTimeout = clientConfiguration.getRequestTimeout();
// 向endpoints发送请求
final RpcFuture<QueryAssignmentRequest, QueryAssignmentResponse> future1 =
this.getClientManager().queryAssignment(endpoints, request, requestTimeout);
return Futures.transformAsync(future1, response -> {
final Status status = response.getStatus();
StatusChecker.check(status, future1);
// Assignment是对MessageQueue的包装
final List<Assignment> assignmentList = response.getAssignmentsList().stream().map(assignment ->
new Assignment(new MessageQueueImpl(assignment.getMessageQueue()))).collect(Collectors.toList());
final Assignments assignments = new Assignments(assignmentList);
return Futures.immediateFuture(assignments);
}, MoreExecutors.directExecutor());
}, MoreExecutors.directExecutor());
}
private ListenableFuture pickEndpointsToQueryAssignments(String topic)
获取topic对应的broker集合中的一个broker的endpoint
private ListenableFuture<Endpoints> pickEndpointsToQueryAssignments(String topic) {
// 获取topic对应的路由信息,如果topicRouteCache存在直接返回,如果不存在则请求Proxy获取路由信息
final ListenableFuture<TopicRouteData> future = getRouteData(topic);
return Futures.transformAsync(future, topicRouteData -> {
Endpoints endpoints = topicRouteData.pickEndpointsToQueryAssignments();
return Futures.immediateFuture(endpoints);
}, MoreExecutors.directExecutor());
}
public Endpoints pickEndpointsToQueryAssignments()
通过轮询的方法获取broker对应的endpoints
public Endpoints pickEndpointsToQueryAssignments() throws NotFoundException {
int nextIndex = index.getAndIncrement();
for (int i = 0; i < messageQueueImpls.size(); i++) {
final MessageQueueImpl messageQueueImpl = messageQueueImpls.get(IntMath.mod(nextIndex++,
messageQueueImpls.size()));
final Broker broker = messageQueueImpl.getBroker();
if (Utilities.MASTER_BROKER_ID != broker.getId()) {
continue;
}
if (Permission.NONE.equals(messageQueueImpl.getPermission())) {
continue;
}
return broker.getEndpoints();
}
throw new NotFoundException("Failed to pick endpoints to query assignment");
}
RpcFuture<QueryAssignmentRequest, QueryAssignmentResponse> queryAssignment(…)
- Endpoints endpoints: 请求的broker对应的endpoints
- QueryAssignmentRequest request:查询分配请求
- Duration duration:请求超时时间
public RpcFuture<QueryAssignmentRequest, QueryAssignmentResponse> queryAssignment(Endpoints endpoints,
QueryAssignmentRequest request, Duration duration) {
try {
final Metadata metadata = client.sign();
final Context context = new Context(endpoints, metadata);
// 获取endpoints对应的RpcClient,缓存在rpcClientTable,如果缓存中没有,新创建并加入缓存中,长时间不使用时会从缓存中删除
final RpcClient rpcClient = getRpcClient(endpoints);
// 通过gRPC请求
final ListenableFuture<QueryAssignmentResponse> future =
rpcClient.queryAssignment(metadata, request, asyncWorker, duration);
return new RpcFuture<>(context, request, future);
} catch (Throwable t) {
return new RpcFuture<>(t);
}
}
void syncProcessQueue(…)
将从broker获取的MessageQueue(assignments)和本地缓存的做对比,如果本地缓存的在assignments中不存在或者过期了,停止对应的MessageQueue的消费行为。如果assignments有但本地缓存中没有,新增ProcessQueue对MessageQueue进行消费。
- String topic :主题
- Assignments assignments:topic对应的MessageQueue集合
- FilterExpression filterExpression:过滤规则
void syncProcessQueue(String topic, Assignments assignments, FilterExpression filterExpression) {
Set<MessageQueueImpl> latest = new HashSet<>();
final List<Assignment> assignmentList = assignments.getAssignmentList();
for (Assignment assignment : assignmentList) {
latest.add(assignment.getMessageQueue());
}
Set<MessageQueueImpl> activeMqs = new HashSet<>();
for (Map.Entry<MessageQueueImpl, ProcessQueue> entry : processQueueTable.entrySet()) {
final MessageQueueImpl mq = entry.getKey();
final ProcessQueue pq = entry.getValue();
if (!topic.equals(mq.getTopic())) {
continue;
}
if (!latest.contains(mq)) {
log.info("Drop message queue according to the latest assignmentList, mq={}, clientId={}", mq,
clientId);
dropProcessQueue(mq);
continue;
}
if (pq.expired()) {
log.warn("Drop message queue because it is expired, mq={}, clientId={}", mq, clientId);
dropProcessQueue(mq);
continue;
}
activeMqs.add(mq);
}
// 新增加的MessageQueue,添加对应的ProcessQueue对其消息进行消费
for (MessageQueueImpl mq : latest) {
if (activeMqs.contains(mq)) {
continue;
}
final Optional<ProcessQueue> optionalProcessQueue = createProcessQueue(mq, filterExpression);
if (optionalProcessQueue.isPresent()) {
log.info("Start to fetch message from remote, mq={}, clientId={}", mq, clientId);
optionalProcessQueue.get().fetchMessageImmediately();
}
}
}
Optional createProcessQueue(MessageQueueImpl mq, final FilterExpression filterExpression)
protected Optional<ProcessQueue> createProcessQueue(MessageQueueImpl mq, final FilterExpression filterExpression) {
final ProcessQueueImpl processQueue = new ProcessQueueImpl(this, mq, filterExpression);
final ProcessQueue previous = processQueueTable.putIfAbsent(mq, processQueue);
if (null != previous) {
return Optional.empty();
}
return Optional.of(processQueue);
}
ProcessQueue
public void fetchMessageImmediately()
public void fetchMessageImmediately() {
receiveMessageImmediately();
}
private void receiveMessageImmediately()
private void receiveMessageImmediately() {
final ClientId clientId = consumer.getClientId();
if (!consumer.isRunning()) {
log.info("Stop to receive message because consumer is not running, mq={}, clientId={}", mq, clientId);
return;
}
try {
final Endpoints endpoints = mq.getBroker().getEndpoints();
final int batchSize = this.getReceptionBatchSize();
final Duration longPollingTimeout = consumer.getPushConsumerSettings().getLongPollingTimeout();
final ReceiveMessageRequest request = consumer.wrapReceiveMessageRequest(batchSize, mq, filterExpression,
longPollingTimeout);
activityNanoTime = System.nanoTime();
// Intercept before message reception.
final MessageInterceptorContextImpl context = new MessageInterceptorContextImpl(MessageHookPoints.RECEIVE);
consumer.doBefore(context, Collections.emptyList());
final ListenableFuture<ReceiveMessageResult> future = consumer.receiveMessage(request, mq,
longPollingTimeout);
Futures.addCallback(future, new FutureCallback<ReceiveMessageResult>() {
@Override
public void onSuccess(ReceiveMessageResult result) {
// Intercept after message reception.
final List<GeneralMessage> generalMessages = result.getMessageViewImpls().stream()
.map((Function<MessageView, GeneralMessage>) GeneralMessageImpl::new)
.collect(Collectors.toList());
final MessageInterceptorContextImpl context0 =
new MessageInterceptorContextImpl(context, MessageHookPointsStatus.OK);
consumer.doAfter(context0, generalMessages);
try {
onReceiveMessageResult(result);
} catch (Throwable t) {
// Should never reach here.
log.error("[Bug] Exception raised while handling receive result, mq={}, endpoints={}, "
+ "clientId={}", mq, endpoints, clientId, t);
onReceiveMessageException(t);
}
}
@Override
public void onFailure(Throwable t) {
// Intercept after message reception.
final MessageInterceptorContextImpl context0 =
new MessageInterceptorContextImpl(context, MessageHookPointsStatus.ERROR);
consumer.doAfter(context0, Collections.emptyList());
log.error("Exception raised during message reception, mq={}, endpoints={}, clientId={}", mq,
endpoints, clientId, t);
onReceiveMessageException(t);
}
}, MoreExecutors.directExecutor());
receptionTimes.getAndIncrement();
consumer.getReceptionTimes().getAndIncrement();
} catch (Throwable t) {
log.error("Exception raised during message reception, mq={}, clientId={}", mq, clientId, t);
onReceiveMessageException(t);
}
}
void onReceiveMessageResult(ReceiveMessageResult result)
private void onReceiveMessageResult(ReceiveMessageResult result) {
final List<MessageViewImpl> messages = result.getMessageViewImpls();
if (!messages.isEmpty()) {
cacheMessages(messages);
receivedMessagesQuantity.getAndAdd(messages.size());
consumer.getReceivedMessagesQuantity().getAndAdd(messages.size());
// 根据startUp方法里的createConsumeService可知consumerService对应StandardConsumeService
consumer.getConsumeService().consume(this, messages);
}
receiveMessage();
}
void eraseMessage(MessageViewImpl messageView, ConsumeResult consumeResult)
处理消息的消费结果,如果消费成功,给broker发送ack消息,如果处理失败,按照重试的间隔时间设置消息不可见时间
public void eraseMessage(MessageViewImpl messageView, ConsumeResult consumeResult) {
statsConsumptionResult(consumeResult);
ListenableFuture<Void> future = ConsumeResult.SUCCESS.equals(consumeResult) ? ackMessage(messageView) :
nackMessage(messageView);
//删除消息对应的本地缓存
future.addListener(() -> evictCache(messageView), MoreExecutors.directExecutor());
}
StandardConsumeService:并发消息处理
void consume(ProcessQueue pq, List messageViews)
public void consume(ProcessQueue pq, List<MessageViewImpl> messageViews) {
for (MessageViewImpl messageView : messageViews) {
// Discard corrupted message.
if (messageView.isCorrupted()) {
log.error("Message is corrupted for standard consumption, prepare to discard it, mq={}, "
+ "messageId={}, clientId={}", pq.getMessageQueue(), messageView.getMessageId(), clientId);
pq.discardMessage(messageView);
continue;
}
final ListenableFuture<ConsumeResult> future = consume(messageView);
Futures.addCallback(future, new FutureCallback<ConsumeResult>() {
@Override
public void onSuccess(ConsumeResult consumeResult) {
pq.eraseMessage(messageView, consumeResult);
}
@Override
public void onFailure(Throwable t) {
// Should never reach here.
log.error("[Bug] Exception raised in consumption callback, clientId={}", clientId, t);
}
}, MoreExecutors.directExecutor());
}
}
ListenableFuture consume(MessageViewImpl messageView)
public ListenableFuture<ConsumeResult> consume(MessageViewImpl messageView) {
return consume(messageView, Duration.ZERO);
}
ListenableFuture consume(MessageViewImpl messageView, Duration delay)
将消息封装为ConsumeTask后放到线程池中执行
- MessageViewImpl messageView:需要消费消费的消息
- Duration delay:消费延迟时间
public ListenableFuture<ConsumeResult> consume(MessageViewImpl messageView, Duration delay) {
final ListeningExecutorService executorService = MoreExecutors.listeningDecorator(consumptionExecutor);
final ConsumeTask task = new ConsumeTask(clientId, messageListener, messageView, messageInterceptor);
// delay<=0:立即执行消息
if (Duration.ZERO.compareTo(delay) >= 0) {
return executorService.submit(task);
}
// 延迟delay后消费消息
final SettableFuture<ConsumeResult> future0 = SettableFuture.create();
scheduler.schedule(() -> {
final ListenableFuture<ConsumeResult> future = executorService.submit(task);
Futures.addCallback(future, new FutureCallback<ConsumeResult>() {
@Override
public void onSuccess(ConsumeResult consumeResult) {
future0.set(consumeResult);
}
@Override
public void onFailure(Throwable t) {
// Should never reach here.
log.error("[Bug] Exception raised while submitting scheduled consumption task, clientId={}",
clientId, t);
}
}, MoreExecutors.directExecutor());
}, delay.toNanos(), TimeUnit.NANOSECONDS);
return future0;
}
FifoConsumeService:顺序消息
在充实中一定的次数前,都是将消息包装成任务放到本地线程池里进行重复执行,直到成功或者失败或者超出次数限制后才给broker响应。
public void consume(ProcessQueue pq, List<MessageViewImpl> messageViews) {
consumeIteratively(pq, messageViews.iterator());
}
void consumeIteratively(ProcessQueue pq, Iterator iterator)
public void consumeIteratively(ProcessQueue pq, Iterator<MessageViewImpl> iterator) {
if (!iterator.hasNext()) {
return;
}
final MessageViewImpl messageView = iterator.next();
// 消息消费异常,将消息加入死信队列
if (messageView.isCorrupted()) {
// Discard corrupted message.
log.error("Message is corrupted for FIFO consumption, prepare to discard it, mq={}, messageId={}, "
+ "clientId={}", pq.getMessageQueue(), messageView.getMessageId(), clientId);
pq.discardFifoMessage(messageView);
consumeIteratively(pq, iterator);
return;
}
// 消息消费,调用父类的消息消费逻辑,同并发消费保持一致
final ListenableFuture<ConsumeResult> future0 = consume(messageView);
ListenableFuture<Void> future = Futures.transformAsync(future0, result -> pq.eraseFifoMessage(messageView,
result), MoreExecutors.directExecutor());
future.addListener(() -> consumeIteratively(pq, iterator), MoreExecutors.directExecutor());
}
ListenableFuture eraseFifoMessage(MessageViewImpl messageView, ConsumeResult consumeResult)
消息消费后的处理逻辑;
- 消费失败后,如果没有超过设置的重试次数,获取间隔时间并重新消费;
- 消费失败且重试次数大于设置的重试次数,调用forwardToDeadLetterQueue将消息加入死信队列
- 消费成功,给消息发送ack确认
public ListenableFuture<Void> eraseFifoMessage(MessageViewImpl messageView, ConsumeResult consumeResult) {
statsConsumptionResult(consumeResult);
final RetryPolicy retryPolicy = consumer.getRetryPolicy();
final int maxAttempts = retryPolicy.getMaxAttempts();
int attempt = messageView.getDeliveryAttempt();
final MessageId messageId = messageView.getMessageId();
final ConsumeService service = consumer.getConsumeService();
final ClientId clientId = consumer.getClientId();
if (ConsumeResult.FAILURE.equals(consumeResult) && attempt < maxAttempts) {
final Duration nextAttemptDelay = retryPolicy.getNextAttemptDelay(attempt);
attempt = messageView.incrementAndGetDeliveryAttempt();
log.debug("Prepare to redeliver the fifo message because of the consumption failure, maxAttempt={}," +
" attempt={}, mq={}, messageId={}, nextAttemptDelay={}, clientId={}", maxAttempts, attempt, mq,
messageId, nextAttemptDelay, clientId);
final ListenableFuture<ConsumeResult> future = service.consume(messageView, nextAttemptDelay);
return Futures.transformAsync(future, result -> eraseFifoMessage(messageView, result),
MoreExecutors.directExecutor());
}
boolean ok = ConsumeResult.SUCCESS.equals(consumeResult);
if (!ok) {
log.info("Failed to consume fifo message finally, run out of attempt times, maxAttempts={}, "
+ "attempt={}, mq={}, messageId={}, clientId={}", maxAttempts, attempt, mq, messageId, clientId);
}
// Ack message or forward it to DLQ depends on consumption result.
ListenableFuture<Void> future = ok ? ackMessage(messageView) : forwardToDeadLetterQueue(messageView);
future.addListener(() -> evictCache(messageView), consumer.getConsumptionExecutor());
return future;
}
ConsumeTask
调用了自定义的消息处理逻辑,消息处理有两种结果,默认消费失败
- SUCCESS:消费成功
- FAILURE:消费失败
public ConsumeResult call() {
ConsumeResult consumeResult;
final List<GeneralMessage> generalMessages = Collections.singletonList(new GeneralMessageImpl(messageView));
MessageInterceptorContextImpl context = new MessageInterceptorContextImpl(MessageHookPoints.CONSUME);
messageInterceptor.doBefore(context, generalMessages);
try {
// messageListener是自己实现的消息处理类
consumeResult = messageListener.consume(messageView);
} catch (Throwable t) {
log.error("Message listener raised an exception while consuming messages, clientId={}", clientId, t);
// If exception was thrown during the period of message consumption, mark it as failure.
consumeResult = ConsumeResult.FAILURE;
}
MessageHookPointsStatus status = ConsumeResult.SUCCESS.equals(consumeResult) ? MessageHookPointsStatus.OK :
MessageHookPointsStatus.ERROR;
context = new MessageInterceptorContextImpl(context, status);
messageInterceptor.doAfter(context, generalMessages);
// Make sure that the return value is the subset of messageViews.
return consumeResult;
}