整合maven依赖
<!--kafka-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
<!--redis-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
</dependency>
<!--使用xml或properties配置所依赖的包 @PropertySource-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
<optional>true</optional>
</dependency>
消费者代码
使用了redis+线程+延时队列的方式来进行消费。
1、配置文件预览
spring.application.name = sesp-xndc-bj-kafka
server.port= 19091
spring.main.allow-bean-definition-overriding=true
spring.kafka.bootstrap-servers= 192.168.3.111:9092
# producer 生产者
# 发生错误后,消息重发的次数。
spring.kafka.producer.retries = 3
# 当有多个消息需要被发送到同一个分区时,生产者会把它们放在同一个批次里。该参数指定了一个批次可以使用的内存大小,按照字节数计算。16K
spring.kafka.producer.batch-size = 16384
# 生产端缓冲区大小。32M
spring.kafka.producer.buffer-memory= 33554432
# 指定消息键的序列化方式
spring.kafka.producer.key-serializer= org.apache.kafka.common.serialization.StringSerializer
# 指定消息值的序列化方式
spring.kafka.producer.value-serializer= org.apache.kafka.common.serialization.StringSerializer
# 应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
# acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应。
# acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应。
# acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应
spring.kafka.producer.acks = 1
# consumer消费者
# 指定默认消费者group id 该字段见 Kafka 安装包中的 consumer.proerties,可自行修改, 修改完毕后需要重启 Kafka
spring.kafka.consumer.group-id= testGroup
# 提交offset延时(接收到消息后多久提交offset)
# 自动提交的时间间隔 在spring boot 2.X 版本中这里采用的是值的类型为Duration 需要符合特定的格式,如1S,1M,2H,5D
spring.kafka.consumer.auto-commit-interval= 1000
# 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理:
# latest(默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)
# earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
# none:topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
spring.kafka.consumer.auto-offset-reset= latest
# 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
spring.kafka.consumer.enable-auto-commit= false
# 批量消费每次最多消费多少条消息
spring.kafka.consumer.max-poll-records= 50
# 指定消息key和消息体的编解码方式
spring.kafka.consumer.key-deserializer= org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer= org.apache.kafka.common.serialization.StringDeserializer
# 如果没有足够的数据立即满足“fetch-min-size”给出的要求,服务器在响应获取请求之前阻塞的最长时间
# fetch-max-wait:
# 服务器应为获取请求返回的最小数据量。
# fetch-min-size:
# 单次调用 poll() 时返回的最大记录数。
#max-poll-records:
# 到消费者协调器的心跳之间的预期时间。
#heartbeat-interval:
# 读取以事务方式写入的消息的隔离级别。默认读未提交
#isolation-level: read_committed
# 在侦听器容器中运行的线程数。
spring.kafka.listener.concurrency= 5
# listener负责ack,每调用一次,就立即commit AckMode枚举都在这里
spring.kafka.listener.ack-mode= manual_immediate
# 如果代理上不存在至少一个配置的主题,容器是否应该无法启动。默认false
spring.kafka.listener.missing-topics-fatal= false
# 批量消费
#spring.kafka.listener.type=batch
2、构造消费者
为了直观的显示效果,这里的配置均写死,生成环境中可写入配置。下面方法中除了创建【KafkaConsumer】的构造函数以外,还添加了订阅方法【subscribe】、消费消息方法【pool】、手动提交方法【commitSync】。
其中消费消息方法的参数为【Duration】而不是【long】,查看源码得知参数为long的方法已弃用。
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.List;
import java.util.Properties;
public class Consumer {
private KafkaConsumer<String, String> consumer;
// 一次请求的最大等待时间
private final int waitTime = 1;
/**
* NewConsumer构造函数
*/
public Consumer() {
Properties props = new Properties();
// Broker连接地址
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.3.111:9092");
// Group id
props.put(ConsumerConfig.GROUP_ID_CONFIG, "testGroup");
// 是否自动提交offset
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
// 自动提交offset的时间间隔
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
// 会话超时时间
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
// 消息Key值使用的反序列化类
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getName());
// 消息内容使用的反序列化类
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getName());
// 最大消费量
props.put("max.poll.records", 1000);
consumer = new KafkaConsumer(props);
}
public void subscribe(List topic) {
consumer.subscribe(topic);
}
public ConsumerRecords<String, String> poll() {
return consumer.poll(Duration.ofSeconds(waitTime));
}
public void commitSync() {
consumer.commitSync();
}
}
3、消费订阅
首先订阅topic,当前topic写死,生成环境可配置;然后线程运行时开始消费kafka,如果有返回值,则做进一步处理,其中用到了redis,可根据实际情况是否引用。
import com.nari.consumer.Consumer;
import com.nari.mapper.DsmMeteValueMapper;
import com.nari.redis.StringRedisService;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@Service
@Slf4j
public class MeteValueSubscribe extends Thread{
// 自定义 构造函数 ,通过注解方式初始化失败
private Consumer kafkaConsumer;
@Autowired
private StringRedisService redisService;
@Resource
private DsmMeteValueMapper dsmMeteValueMapper;
@Value("${queue.thread.num}")
private String threadNum = 10;
private DelayQueue<Item> queue;
// 延时队列时间,单位秒
@Value("${queue.delay.time}")
private Integer delayTime = 1;
@Value("${queue.group.num}")
private Integer queueNum = 5000;
//@PostConstruct --因为有while循环,不会进入其他的方法执行
public void subscribeMSG(DelayQueue<Item> queue) {
kafkaConsumer = new Consumer();
List<String> topics = new ArrayList<>();
topics.add("testTopic");
// 订阅
kafkaConsumer.subscribe(topics);
this.queue = queue;
log.info("订阅结束");
this.start();
}
@Override
public void run() {
ExecutorService newCachedThreadPool = Executors.newFixedThreadPool(Integer.valueOf(threadNum));
while (true) {
log.info("开始消费kafka");
// 消息消费请求
ConsumerRecords<String, String> records = kafkaConsumer.poll();
log.info("records.count===" + records.count());
// 消息处理
for (ConsumerRecord<String, String> record : records) {
log.info("消息处理,record===" + record.toString());
OCDsmMeteValueRedis thread = new OCDsmMeteValueRedis(record, redisService, dsmMeteValueMapper, queue, delayTime, queueNum);
newCachedThreadPool.execute(thread);
}
kafkaConsumer.commitSync();
}
}
}
4、消费消息处理类
这里主要将消费到的消息内容进行处理,然后创建一个延时队列。
import com.nari.entity.vo.*;
import com.nari.mapper.DsmMeteValueMapper;
import com.nari.redis.StringRedisService;
import com.nari.util.Common;
import com.nari.util.JsonUtil;
import com.nari.util.LogUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import java.util.List;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@Slf4j
public class OCDsmMeteValueRedis extends Thread{
private StringRedisService redisService;
private DsmMeteValueMapper dsmMeteValueMapper;
private ConsumerRecord<String, String> record;
private DelayQueue<Item> queue;
private Integer delayTime;
private Integer queueNum;
public OCDsmMeteValueRedis(ConsumerRecord<String, String> record,
StringRedisService redisService,
DsmMeteValueMapper dsmMeteValueMapper,
DelayQueue<Item> queue,
Integer delayTime,
Integer queueNum) {
this.record = record;
this.redisService = redisService;
this.dsmMeteValueMapper = dsmMeteValueMapper;
this.queue = queue;
this.delayTime = delayTime;
this.queueNum = queueNum;
}
@Override
public void run() {
String topic = record.topic();
log.info("[OCDsmMeteValueRedis], " +
"Received message: (key:" + record.key() + " topic:" + topic +
",\\r\\n value: " + record.value()
+ ") at offset " + record.offset());
try {
String value = record.value();
if ("testTopic".equals(topic)) {
// 具体业务处理
List<String> varList = new ArrayList<>();
Integer queueWait = delayTime * (i + 5);
Item item = new Item(topic, queueWait, TimeUnit.SECONDS, isRedis, varList);
queue.add(item);
}
} catch (Exception e) {
log.error("kafka写入数据失败:" + e);
}
}
}
5、延时队列生成类
import java.util.List;
import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit;
public class Item implements Delayed {
/* 触发时间*/
private long time;
String topic;
//0 不需要取redis中最新的判断 1 需要
String isRedis;
// 设备负荷数据集合
List<String> varList;
public Item(String topic, long time, TimeUnit unit, String isRedis, List<String> varList) {
this.topic = topic;
this.time = System.currentTimeMillis() + (time > 0 ? unit.toMillis(time) : 0);
this.isRedis=isRedis;
this.varList= varList;
}
@Override
public long getDelay(TimeUnit unit) {
return time - System.currentTimeMillis();
}
@Override
public int compareTo(Delayed o) {
Item item = (Item) o;
long diff = this.time - item.time;
if (diff <= 0) {// 改成>=会造成问题
return -1;
} else {
return 1;
}
}
@Override
public String toString() {
return "Item{" +
"time=" + time +
", topic='" + topic + '\'' +
'}';
}
}
6、解析延时队列类
import com.nari.mapper.DsmMeteValueMapper;
import com.nari.redis.StringRedisService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@Service
public class QueueMeteValue extends Thread{
@Autowired
private StringRedisService redisService;
@Resource
private DsmMeteValueMapper dsmMeteValueMapper;
private DelayQueue<Item> queue;
@Value("${queue.thread.num}")
private String threadNum = 10;
@Value("${queue.insert.num}")
private Integer insertNum = 500;
public void queueMSG(DelayQueue<Item> queue) {
this.queue = queue;
this.start();
}
@Override
public void run() {
while (true) {
ExecutorService newCachedThreadPool = Executors.newFixedThreadPool(Integer.valueOf(threadNum));
try {
Item take = queue.take();
log.info("延时队列的个数:" + queue.size());
System.out.format("topic:{%s}, time:{%s}\n", take.topic, LocalDateTime.now().format(DateTimeFormatter.ISO_DATE_TIME));
QueueThread thread = new QueueThread(take, insertNum, redisService, dsmMeteValueMapper);
newCachedThreadPool.execute(thread);
} catch (Exception e) {
log.error("解析队列失败:" + e);
}
}
}
}
7、延时队列处理类
可在延时队列处理类中处理具体业务逻辑。
public class QueueThread extends Thread{
private Item take;
private Integer insertNum;
private StringRedisService redisService;
private DsmMeteValueMapper dsmMeteValueMapper;
public QueueThread(Item take, Integer insertNum, StringRedisService redisService, DsmMeteValueMapper dsmMeteValueMapper) {
this.take = take;
this.insertNum = insertNum;
this.redisService = redisService;
this.dsmMeteValueMapper = dsmMeteValueMapper;
}
@Override
public void run() {
try {
String topic = take.topic;
log.info("进入延时队列,topic:" + topic);
long start_time = System.currentTimeMillis();
Integer size = 0;
if ("testTopic".equals(topic)) {
List<String> varList = take.varList;
// 具体业务处理
}
} catch (Exception e) {
log.error("解析队列失败:" + e);
}
}
}
8、启动后运行
@Component
public class ApplicationStartup implements ApplicationRunner {
@Autowired
private MeteValueSubscribe meteValueSubscribe;
@Autowired
private QueueMeteValue queueMeteValue;
@Override
public void run(ApplicationArguments args) throws Exception {
DelayQueue<Item> queue = new DelayQueue<>();
// 订阅
meteValueSubscribe.subscribeMSG(queue);
// 延时队列消费
queueMeteValue.queueMSG(queue);
}
}