消息队列
list
基于 list 来实现队列
lpush + brpop
消息生产者
public class Supplier {
public static final String QUEUE_NAME = "listq";
public static void publish(Object obj){
RedisOpsUtils.instance().opsForList().leftPush(QUEUE_NAME,obj);
}
}
消息消费者
public class Consumer {
public static void consumer(){
ThreadUtil.execAsync(() -> {
while (true) {
Object o = RedisOpsUtils.instance().opsForList().rightPop(Supplier.QUEUE_NAME, 3, TimeUnit.SECONDS);
if(o == null) continue;
ThreadPoolUtil.submit(() -> System.out.println("收到消息:" + o));
}
});
}
}
测试
@Test
@SneakyThrows
public void test_redis_list_queue() {
// 开启侦听
Consumer.consumer();
// 发布事件
for (int i = 0; i < 100; i++) {
Supplier.publish("msg" + i);
}
Thread.sleep(1000 * 10);
}
优缺点
- 消息无法重复被消费,当接收到消息后直接出队
- 存在消息丢失,接收到消息后,服务器挂掉
zset
基于 zset 来实现队列
zadd + bzpopmax | bzpopmin
如果使用的是低版本 redisson 作为 redis 连接工厂 会出现问题,最好升级到最新版
生产者
public class ZSetSupplier {
public static void publish(String obj, double score) {
ZSetOperations<String, String> zSetOperations = RedisOpsUtils.instance().opsForZSet();
zSetOperations.add("zsetq", obj, score);
}
}
消费者
public class ZSetConsumer {
public static void consumer() {
ThreadPoolUtil.submit(() -> {
while (true) {
try {
Object o = RedisOpsUtils.instance().opsForZSet().popMax("zsetq", 1, TimeUnit.SECONDS);
if (o == null) continue;
ThreadPoolUtil.submit(() -> System.out.println("收到消息:" + o));
}catch (InvalidDataAccessApiUsageException e){
System.out.println(e.getMessage());
}
}
});
}
}
测试
@Test
@SneakyThrows
public void test_redis_zset_queue() {
// 开启侦听
ZSetConsumer.consumer();
// 发布事件
for (int i = 0; i < 100; i++) {
ZSetSupplier.publish("msg" + i, i);
}
}
优缺点
- 和 list 实现队列一致。
- 并且因为 bzpopmax,我们能够在一定程度上干扰消息消费的顺序,但因为一直在往队列中添加消息,所以并不可靠。
发布/订阅
配置
public class RedisConf{
// redis 发布订阅模式配置
@Bean
public RedisMessageListener redisMessageListener() {
// 配置使用我们自定义的消息监听器
return new RedisMessageListener();
}
@Bean
public ChannelTopic channel() {
// 配置监听管道
return new ChannelTopic("channel-1");
}
@Bean
public RedisMessageListenerContainer messageListenerContainer(RedisConnectionFactory redisConnectionFactory) {
RedisMessageListenerContainer container = new RedisMessageListenerContainer();
container.setConnectionFactory(redisConnectionFactory);
// 这里可以配置 哪个类监听哪条管道
container.addMessageListener(redisMessageListener(), channel());
return container;
}
}
发布消息
@Component
public class RedisMessagePublisher {
@Resource
private RedisTemplate selfRedisTemplate;
public void publishEvent(Object msg){
selfRedisTemplate.convertAndSend("channel-1",msg);
}
}
订阅消息
public class RedisMessageListener implements MessageListener {
@Override
public void onMessage(Message message, byte[] pattern) {
// 接收信息
String msg = new String(message.getBody());
System.out.println(msg);
}
}
测试
@Resource
private Publisher publisher;
@Test
public void test_redis_pub(){
for (int i = 0; i < 100; i++) {
publisher.publish("msg" + i);
}
}
优缺点
- 支持分组订阅;单个订阅者可订阅多个发布者;对于一个分组来说,组中的所有成员都能够接收到同一个消息。
- 数据丢失。redis宕机(pub/sub操作不会写入到rdb 和 aof 中);消息发布速度太快导致达到缓存区上限(会将消费者踢下线);不会持久化数据(pub/sub 并没有基于任何数据结构来实现,只是将消息从管道的一段推到另一端)
stream
stream 是 redis 5.0 之后提供的 api。
常量类
public interface StreamConstant {
String MESSAGE_QUEUE_NAME = "test_message_queue";
String GROUP_A_NAME = "groupa";
String GROUP_B_NAME = "groupb";
String CONSUMER_A_NAME = "consumer_a";
String CONSUMER_B_NAME = "consumer_b";
String CONSUMER_C_NAME = "consumer_c";
}
配置
@Configuration
public class RedisStreamConsumerConfig implements StreamConstant {
@Bean
public RedisTemplate redisStreamTemplate(RedisConnectionFactory redisConnectionFactory) {
RedisTemplate<String, Object> redisStreamTemplate = new RedisTemplate<>();
redisStreamTemplate.setConnectionFactory(redisConnectionFactory);
StringRedisSerializer stringRedisSerializer = new StringRedisSerializer();
FastJsonRedisSerializer<Object> fastJsonRedisSerializer = new FastJsonRedisSerializer<>(Object.class);
redisStreamTemplate.setKeySerializer(stringRedisSerializer);
redisStreamTemplate.setValueSerializer(fastJsonRedisSerializer);
redisStreamTemplate.setHashKeySerializer(stringRedisSerializer);
redisStreamTemplate.setHashValueSerializer(fastJsonRedisSerializer);
redisStreamTemplate.afterPropertiesSet();
return redisStreamTemplate;
}
/**
* 主要做的是将OrderStreamListener监听绑定消费者,用于接收消息
*
* @param redisConnectionFactory
* @return 不采用消费者组模式 直接从队列中取出消息
*/
@Bean
public StreamMessageListenerContainer<String, ObjectRecord<String, String>> container(
RedisConnectionFactory redisConnectionFactory) {
StreamMessageListenerContainer.StreamMessageListenerContainerOptions<String, ObjectRecord<String, String>> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions
.builder()
.pollTimeout(Duration.ofSeconds(1)) // 拉取消息超时时间
.batchSize(3) // 批量抓取消息
.targetType(String.class) // 传递的数据类型
.executor(ThreadPoolUtil.getThreadPoolExecutor("msg-queue-pool"))
.build();
StreamMessageListenerContainer<String, ObjectRecord<String, String>> container = StreamMessageListenerContainer
.create(redisConnectionFactory, options);
StreamOffset<String> offset = StreamOffset.create(MESSAGE_QUEUE_NAME, ReadOffset.lastConsumed());
StreamMessageListenerContainer.StreamReadRequestBuilder streamReadRequestBuilder = StreamMessageListenerContainer.StreamReadRequest.builder(offset);
StreamMessageListenerContainer.StreamReadRequest request = streamReadRequestBuilder.build();
// 注册 订阅者
container.register(request, msg -> System.out.println("收到消息:" + message.getValue()) );
container.start();
return container;
}
/**
* 消费者组进行消费,在使用之前必须先创建分组(这里在代码中创建)
*
* @param redisConnectionFactory
* @return
*/
// @Bean
public StreamMessageListenerContainer consumerContainer(RedisConnectionFactory redisConnectionFactory) {
StreamMessageListenerContainer.StreamMessageListenerContainerOptions<String, ObjectRecord<String, String>> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions
.builder()
.pollTimeout(Duration.ofSeconds(5)) // 拉取消息超时时间
.batchSize(10) // 批量抓取消息
.targetType(String.class) // 传递的数据类型
.executor(ThreadPoolUtil.getThreadPoolExecutor("msg-queue-pool"))
.build();
StreamMessageListenerContainer<String, ObjectRecord<String, String>> container = StreamMessageListenerContainer
.create(redisConnectionFactory, options);
// 读取从来没有分给同组内别的consumer的消息
StreamOffset<String> offset = StreamOffset.create(MESSAGE_QUEUE_NAME, ReadOffset.lastConsumed());
StreamMessageListenerContainer.StreamReadRequestBuilder streamReadRequestBuilder =
StreamMessageListenerContainer.StreamReadRequest.builder(offset);
// 注册消费者
for (Consumer consumer : ConsumerFactory.getConsumers()) {
StreamMessageListenerContainer.StreamReadRequest request = streamReadRequestBuilder
.consumer(consumer)
.autoAcknowledge(true) // 开启自动 ack
.cancelOnError(p -> false)
.errorHandler(error -> {
})
.build();
container.register(request, message -> System.out.println(consumer.getName() + "收到消息:" + message.getValue()));
}
container.start();
return container;
}
private static class ConsumerFactory {
private static final List<String> HAS_INIT_GROUPS = new ArrayList<>();
private static final RedisTemplate redisStreamTemplate;
private static final String[] GROUPS = new String[]{GROUP_A_NAME, GROUP_B_NAME};
private static final String[] consumers = new String[]{CONSUMER_A_NAME, CONSUMER_B_NAME, CONSUMER_C_NAME};
static {
redisStreamTemplate = SpringContextUtis.getBean("redisStreamTemplate", RedisTemplate.class);
StreamInfo.XInfoGroups groups = redisStreamTemplate.opsForStream().groups(MESSAGE_QUEUE_NAME);
groups.stream().map(StreamInfo.XInfoGroup::groupName).collect(
() -> HAS_INIT_GROUPS,
List::add,
(left, right) -> left.addAll(right)
);
}
public static final LinkedList<Consumer> getConsumers() {
LinkedList<Consumer> list = new LinkedList();
for (String groupName : GROUPS) {
if (!HAS_INIT_GROUPS.contains(groupName)) {
// 使用 consumer 前必须建立起分组
redisStreamTemplate.opsForStream().createGroup(MESSAGE_QUEUE_NAME, ReadOffset.from("0"), groupName);
}
for (String consumer : consumers) {
list.add(Consumer.from(groupName, groupName + consumer));
}
}
return list;
}
}
}
创建消息发布者
@Component
public class RedisStreamPublisher {
@Resource
private RedisTemplate redisStreamTemplate;
public void publish(Object obj){
StringRecord record = StreamRecords.newRecord()
.ofStrings(Maps.newHashMap("field", String.valueOf(obj)))
.withStreamKey(StreamConstant.MESSAGE_QUEUE_NAME);
redisStreamTemplate.opsForStream().add(record);
}
}
测试
@Test
@SneakyThrows
public void test_redis_stream_non_group(){
for (int i = 1; i <= 100; i++) {
redisStreamPublisher.publish("msg" + i);
}
Thread.sleep( 60 * 1000 );
}