pom.xml引入依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>3.1.2</version>
</dependency>
测试代码
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.config.SslConfigs;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Properties;
public class ConsumerManage {
private static KafkaConsumer<String, String> kafkaConsumer;
public static KafkaConsumer<String, String> getKafkaConsumer() {
//下面为测试配置数据,请根据实际配置替换
String groupId = "group-zhou02";
String bootstrapServers = "192.168.1.176:9093";
String certPassword = "zhozhozhozhozhozho";
String sslKeystore = "D:\\SSL\\zhou02\\zhou02.keystore.jks";
String sslTruststore = "D:\\SSL\\zhou02\\zhou02.truststore.jks";
if (kafkaConsumer == null) {
Properties properties=new Properties();
properties.put(CommonClientConfigs.GROUP_ID_CONFIG, groupId);
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, sslKeystore);
properties.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, certPassword);
properties.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,sslTruststore);
properties.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, certPassword);
properties.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, certPassword);
properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
properties.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");
//是否自动提交
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
//自动提交offset之间的间隔毫秒数
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
/**
* 消费者在指定的时间内没有发送心跳给群组协调器,就被认为已经死亡,
* 协调器就会触发再均衡,把它的分区分配给其他消费者。
*/
properties.put("session.timeout.ms", "30000");
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
kafkaConsumer = new KafkaConsumer<String, String>(properties);
}
return kafkaConsumer;
}
public void consume(String topic){
kafkaConsumer = getKafkaConsumer();
// 订阅主题
ArrayList<String> topics = new ArrayList<>();
topics.add(topic);
kafkaConsumer.subscribe(topics);
// 消费数据
while (true){
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofSeconds(1));
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
System.out.println("开始消费");
System.out.println(consumerRecord.value());
//业务处理逻辑
//若需y要手动提交offset值:请将上面自动提交配置改为false,在打开下面注释
//kafkaConsumer.commitSync();
}
}
}
public static void main(String[] args) {
new ConsumerManage().consume("topic-zhou02");
}
}