- 引包
<dependency>
<groupId>org.apache.rocketmq</groupId>
<artifactId>rocketmq-spring-boot-starter</artifactId>
<version>2.0.3</version>
</dependency>
2.redsi存储,通用的消费者。
@Data
@Component
@ConfigurationProperties(prefix = "xxx.rocketmq")
@RefreshScope
public class RocketMQConfigProperties {
//主题消费组
private List<RocketMQTopicGroupBean> topicGroup;
//RocketMQ连接地址
private String rocketmqAddr;
}
@Data
public class RocketMQTopicGroupBean {
//组名称
private String groupName;
//redis key 值
private String redisKey;
//消费主题
private String topic;
}
@Component
@Slf4j
public class InitConsumer {
@Autowired
RocketMQConfigProperties rocketMQConfigProperties;
@Autowired
private StringRedisTemplate stringRedisTemplate;
private static final String RIDERSID="riderSid";
@PostConstruct
public void init() {
List<RocketMQTopicGroupBean> topicGroupBean = rocketMQConfigProperties.getTopicGroup();
if (topicGroupBean != null && !topicGroupBean.isEmpty()) {
topicGroupBean.forEach(this::registrationMessage);
}
}
/**
* 初始化topic
*/
private void registrationMessage(RocketMQTopicGroupBean x) {
String redisKey = x.getRedisKey();
String topic = x.getTopic();
String groupName = x.getGroupName();
String rocketmqAddr = rocketMQConfigProperties.getRocketmqAddr();
DefaultMQPushConsumer defaultMQPushConsumer = new DefaultMQPushConsumer(groupName);
defaultMQPushConsumer.setNamesrvAddr(rocketmqAddr);
// 跳过历史重最新开始消费
defaultMQPushConsumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET);
// 集群:同一条消息 只会被一个消费者节点消费到
defaultMQPushConsumer.setMessageModel(MessageModel.CLUSTERING);
// 设置每次拉取的消息量,默认为1
defaultMQPushConsumer.setConsumeMessageBatchMaxSize(1);
// 注册监听器
defaultMQPushConsumer.registerMessageListener(getMessageListenerConcurrently(redisKey, topic, groupName));
try {
// 订阅主题下所有消息(tag)
defaultMQPushConsumer.subscribe(topic, "*");
// 启动消费者
defaultMQPushConsumer.start();
log.info("RocketMQ 主题为:{},组为:{} 消费者加载成功", topic, groupName);
} catch (MQClientException e) {
log.error(String.valueOf(e));
log.error("RocketMQ 主题为:{}, 组为:{} 消费者加载失败{}", topic, groupName, e.getMessage());
}
}
/**
* 注册监听
* */
private MessageListenerConcurrently getMessageListenerConcurrently(String redisKey, String topic, String groupName) {
return (list, consumeConcurrentlyContext) -> {
try {
list.forEach(y ->
processMessage(y, redisKey, topic, groupName)
);
//ConsumeConcurrentlyStatus.CONSUME_SUCCESS表示消费成功
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
} catch (Exception ex) {
log.info("RocketMQ 主题为:{}, 组为:{} 消费者消费失败", topic, groupName);
log.error(ex.getMessage());
// ConsumeConcurrentlyStatus.RECONSUME_LATER消费失败,消息会重试
return ConsumeConcurrentlyStatus.RECONSUME_LATER;
}
};
}
/**
* 处理消息
*/
private void processMessage(MessageExt y, String redisKey, String topic, String groupName) {
String result = new String(y.getBody());
if (StringUtils.isNotEmpty(result)) {
//根据主题,动态生成对应的key
redisKey = getRedisKey(redisKey, topic, result);
//数据备份key
String redisKeyBak = redisKey + "_BAK";
log.info("RocketMQ 主题为:{}, 组为:{}, 获取消息为: {},redis key为: {}", topic, groupName, result, redisKey);
Boolean aBoolean = stringRedisTemplate.hasKey(redisKeyBak);
if (Boolean.TRUE.equals(aBoolean)) {
//后续消费每次备份上一次的数据
Object historyData = stringRedisTemplate.opsForValue().get(redisKey);
stringRedisTemplate.opsForValue().set(redisKeyBak, historyData == null ? "" : (String) historyData);
} else {
//第一次消费备份的key数据和key的数据同时增加
stringRedisTemplate.opsForValue().set(redisKeyBak, result);
}
//消费更新最新数据
stringRedisTemplate.opsForValue().set(redisKey, result);
log.info("RocketMQ 主题为:{}, 组为:{} 消费者消费成功", topic, groupName);
} else {
log.warn("RocketMQ 主题为:{}, 组为:{} 获取消息为null,暂不更新", topic, groupName);
}
}
/**
* 根据不同的Topic生成对应的Redis key值
*/
private String getRedisKey(String redisKey, String topic, String result) {
if ("XXX1".equals(topic)) {
HashMap<String,String> map = FastJsonUtils.toBean(result,HashMap.class);
if (map != null) {
String riderSid = map.get(InitConsumer.RIDERSID) != null ? map.get(InitConsumer.RIDERSID) : "";
redisKey = redisKey + "@" + riderSid;
}
}
if ("XXX2".equals(topic)) {
List<Map<String,String>> maps = FastJsonUtils.toBean(result,List.class);
if (!maps.isEmpty()) {
//每一次数据riderSid都一致
String riderSid = maps.get(0).get(InitConsumer.RIDERSID) != null ? maps.get(0).get(InitConsumer.RIDERSID) : "";
redisKey = redisKey + "@" + riderSid;
}
}
return redisKey;
}
}
3.事务性RocketMq,生产者
//源码代码 消息发送的返回值
FLUSH_DISK_TIMEOUT
如果设置了 FlushDiskType=SYNC_FLUSH (默认是 ASYNC_FLUSH),并且 Broker 没有在 syncFlushTimeout (默认是 5 秒)设置的时间内完成刷盘,就会收到此状态码。
FLUSH_SLAVE_TIMEOUT
如果设置为 SYNC_MASTER,并且 slave Broker 没有在 syncFlushTimeout 设定时间内完成同步,就会收到此状态码。
SLAVE_NOT_AVAILABLE
如果设置为 SYNC_MASTER,并没有配置 slave Broker,就会收到此状态码。
SEND_OK
这个状态可以简单理解为,没有发生上面列出的三个问题状态就是SEND_OK。需要注意的是,SEND_OK 并不意味着可靠,如果想严格确保没有消息丢失,需要开启 SYNC_MASTER or SYNC_FLUSH。
如果收到了 FLUSH_DISK_TIMEOUT, FLUSH_SLAVE_TIMEOUT,意味着消息会丢失,有2个选择,一是无所谓,适用于消息不关紧要的场景,二是重发,但可能产生消息重复,这就需要consumer进行去重控制。如果收到了 SLAVE_NOT_AVAILABLE 就要赶紧通知管理员了。
public enum SendStatus {
SEND_OK,
FLUSH_DISK_TIMEOUT,//
FLUSH_SLAVE_TIMEOUT,
SLAVE_NOT_AVAILABLE;//
private SendStatus() {
}
}
@RestController
public class TransactionController {
@Autowired
TransactionMq transactionMq;
@RequestMapping("/create")
public String cretaorder(String orderid,String topic) throws MQClientException {
TransactionSendResult send = transactionMq.send(orderid, topic);
return "1";
}
}
//事务监听,消息发送不会马上被消费,executeLocalTransaction执行返回LocalTransactionState.COMMIT_MESSAGE
//才会被消费,返回LocalTransactionState.ROLLBACK_MESSAGE,则消息不会被消费,LocalTransactionState. //UNKNOW则会出发事务回查执行 checkLocalTransaction。我mqa 4版本。
@Component
public class OrderTransactionListener implements TransactionListener {
//事物执行
@Override
public LocalTransactionState executeLocalTransaction(Message message, Object o) {
LocalTransactionState commitMessage = LocalTransactionState. UNKNOW;
return commitMessage;
}
//UNKNOW状态触发事物回查
@Override
public LocalTransactionState checkLocalTransaction(MessageExt messageExt) {
LocalTransactionState commitMessage = LocalTransactionState.COMMIT_MESSAGE;
System.out.println("回查成功");
return commitMessage;
}
}
//消费者初始化
@Slf4j
@Component
public class TransactionMq {
private String producerGroup = "order_trans_group";
private TransactionMQProducer producer;
//用于执行本地事务和事务状态回查的监听器
@Autowired
OrderTransactionListener orderTransactionListener;
//执行任务的线程池
ThreadPoolExecutor executor = new ThreadPoolExecutor(5, 10, 60,
TimeUnit.SECONDS, new ArrayBlockingQueue<>(50));
@PostConstruct
public void init(){
producer = new TransactionMQProducer(producerGroup);
producer.setNamesrvAddr("127.0.0.1:9876");
producer.setSendMsgTimeout(Integer.MAX_VALUE);
producer.setExecutorService(executor);
producer.setTransactionListener(orderTransactionListener);
this.start();
}
private void start(){
try {
this.producer.start();
} catch (MQClientException e) {
e.printStackTrace();
}
}
//事务消息发送
public TransactionSendResult send(String data, String topic) throws MQClientException {
Message message = new Message(topic,data.getBytes());
//异步消息发送
// this.producer.send(message, new SendCallback() {
// @Override
// public void onSuccess(SendResult sendResult) {
//
// }
//
// @Override
// public void onException(Throwable throwable) {
//
// }
// };
return this.producer.sendMessageInTransaction(message, null);
}
}