引入依赖
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>1.1.1.RELEASE</version>
</dependency>
配置
kafka:
consumer:
enable-auto-commit: true
group-id: 12345678901234
auto-commit-interval: 1000
auto-offset-reset: earliest
bootstrap-servers: localhost:9092
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
concurrency: 3
配置类
package com.jiuling.jni.kafka;
import cn.jiuling.plugin.config.DbPropUtil;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
@Configuration
public class KafkaConsumerConfig {
@Value("${kafka.consumer.bootstrap-servers}")
private String servers;
@Value("${kafka.consumer.enable-auto-commit}")
private boolean enableAutoCommit;
@Value("${kafka.consumer.auto-commit-interval}")
private String autoCommitInterval;
@Value("${kafka.consumer.group-id}")
private String groupId;
@Value("${kafka.consumer.auto-offset-reset}")
private String autoOffsetReset;
@Value("${kafka.consumer.concurrency}")
private int concurrency;
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
//并发数量
factory.setConcurrency(concurrency);
//批量获取
factory.setBatchListener(true);
factory.getContainerProperties().setPollTimeout(1500);
// 禁止消费者监听器自启动
factory.setAutoStartup(false);
return factory;
}
public ConsumerFactory<String,String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
public Map<String, Object> consumerConfigs() {
Map<String, Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringDeserializer.class);
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
//最多批量获取n个
propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, DbPropUtil.getInt("anbo.number",50));
return propsMap;
}
@Bean
public FaceKafkaConSumer listener() {
return new FaceKafkaConSumer();
}
}
简单生产者
package com.jiuling.jni.kafka;
import com.alibaba.fastjson.JSON;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
public class ProducerDemo {
public static void main(String[] args) {
Properties properties = new Properties();
properties.put("bootstrap.servers", "localhost:9092");
properties.put("acks", "all");
properties.put("retries", 0);
properties.put("batch.size", 16384);
properties.put("linger.ms", 1);
properties.put("buffer.memory", 33554432);
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = null;
try {
producer = new KafkaProducer<String, String>(properties);
for (int i = 0; i < 100; i++) {
Map<String, String> map = new HashMap<>();
map.put("OBJ_PIC", i + "_obj_pic.jpg");
map.put("PIC", i + "_pic.jpg");
map.put("DEVICE_ID", i + "_xxx");
map.put("JGSK", "20210430153630");
producer.send(new ProducerRecord<String, String>("topic", JSON.toJSONString(map)));
System.out.println("success!");
}
} catch (Exception e) {
e.printStackTrace();
} finally {
producer.close();
}
}
}
消费者
package com.jiuling.jni.kafka;
import com.alibaba.fastjson.JSONObject;
import com.jiuling.jni.entity.KafkaToEsdk;
import com.jiuling.jni.service.IvsSdkUploadImgService;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* 从kafka获取车辆数据
*/
@Service
@Slf4j
@Component
public class CarKafkaConsumer {
@Autowired
IvsSdkUploadImgService uploadImgService;
/**
* 提交esdk任务线程池
*/
private final ExecutorService faceTaskService = new ThreadPoolExecutor(10, 10,
0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>());
@KafkaListener(containerFactory = "kafkaListenerContainerFactory",
topics = "Q_FACE_SHARE_HW")
public void listen(List<ConsumerRecord<String, String>> recordList) {
for (ConsumerRecord<String, String> record : recordList) {
faceTaskService.submit(new Runnable() {
@Override
public void run() {
KafkaToEsdk kafkaToEsdk = new KafkaToEsdk();
JSONObject jsonObject = JSONObject.parseObject(String.valueOf(record.value()));
kafkaToEsdk.setBigImagUrl(jsonObject.get("PIC").toString());
kafkaToEsdk.setImgeUrl(jsonObject.get("OBJ_PIC").toString());
kafkaToEsdk.setCameraCode(jsonObject.get("DEVICE_ID").toString());
kafkaToEsdk.setCaptureTime(Long.parseLong(jsonObject.get("JGSK").toString()));
kafkaToEsdk.setCaptureTime(Long.parseLong(String.valueOf(jsonObject.get("JGSK").toString()) + "000"));
kafkaToEsdk.setIvsSnapshotType(1);
//人脸数据上传
int i = uploadImgService.uploadDeviceImgV2(kafkaToEsdk);
if (i != 0) {
log.error("数据上传失败!url:{}", kafkaToEsdk.getImgeUrl());
}
}
});
}
}
}
动态开启消费者消费
为什么要动态的开启消费者,在特殊业务中我们需要在初始化某些资源之后再启动kafka消费者进行消费。
package com.jiuling.jni.service.impl;
import com.jiuling.jni.service.KafkaService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
import org.springframework.stereotype.Service;
import java.util.Date;
@Service
@Slf4j
public class KafkaServiceImpl implements KafkaService {
@Autowired
private KafkaListenerEndpointRegistry registry;
@Override
public void start() {
// 判断监听容器是否启动,未启动则将其启动
if (!registry.isRunning()) {
registry.start();
log.info("启动kafka消费。。。当前时间:", new Date());
}
}
@Override
public void stop() {
// 暂停监听
registry.stop();
log.info("停止kafka消费。。。当前时间:", new Date());
}
}