配置:
kafka:
#=============== consumer =======================
consumer:
auto-offset-reset: earliest
enable-auto-commit: true
auto-commit-interval: 100
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
listener:
concurrency: 5
missing-topics-fatal: false
producer:
value-serializer: org.springframework.kafka.support.serializer.JsonSerializer # 发送的对象信息变为json格式
retries: 0 #若设置大于0的值,客户端会将发送失败的记录重新发送
batch-size: 16384 #当将多个记录被发送到同一个分区时, Producer 将尝试将记录组合到更少的请求中。这有助于提升客户端和服务器端的性能。这个配置控制一个批次的默认大小(以字节为单位)。16384是缺省的配置
buffer-memory: 33554432 #Producer 用来缓冲等待被发送到服务器的记录的总字节数,33554432是缺省配置
key-serializer: org.apache.kafka.common.serialization.StringSerializer #关键字的序列化类
producer:
@Slf4j
@Service
public class AlertToKafkaService {
@Autowired
private KafkaTemplate kafkaTemplate;
public void sendMessage(IndiAlertLog alertLog, String topic) {
String message = JsonUtils.toJsonString(alertLog);
ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, message);
future.addCallback(
result -> log.debug("成功发送消息到topic:{} 消息内容:{}", topic, message)
, ex -> log.error("发送消息失败", ex));
}
}
consumer:
@Component
@Slf4j
public class AlertToKafkaConsumer {
@KafkaListener(topics = {}, groupId = )
public void consumeMessage(ConsumerRecord<String, String> consumerRecord) {
try {
log.info("消费topic:{} 的消息 -> {}", consumerRecord.topic(), consumerRecord.value());
System.out.println(String.format("消费topic:{} 的消息 -> {}", consumerRecord.topic(), consumerRecord.value()));
} catch (Exception e) {
}
}
}
查看消息: