一、依赖
<repositories>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
</repositories>
<properties>
<kafka-version>2.1.0-cdh6.2.0</kafka-version>
</properties>
<dependencies>
<!--kafka依赖-->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>connect-api</artifactId>
<version>${kafka-version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.12</artifactId>
<version>${kafka-version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka-version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-streams</artifactId>
<version>${kafka-version}</version>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter</artifactId>
<version>RELEASE</version>
<scope>compile</scope>
</dependency>
</dependencies>
二、代码如下
package Demo;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.util.Arrays;
import java.util.Properties;
public class kafkaDemo1 {
public static void main(String[] args) {
String topicName = "datapartition";
//1.配置属性值
Properties properties = new Properties();
//kafka是服务器地址
properties.put("bootstrap.servers", "hdp1:9092");
//定义消费者组
properties.put("group.id", "gmall");
//自动提交(offset)
properties.put("enable.auto.commit", "true");
//自动处理的间隔时间1秒
properties.put("auto.commit.interval.ms", "1000");
//key和values的持久化设置
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//2.创建消费者
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(properties);
//3.消费topic(可以有多个topic)
kafkaConsumer.assign(Arrays.asList(new TopicPartition(topicName,0)));
kafkaConsumer.seekToBeginning(Arrays.asList(new TopicPartition(topicName,0)));
//4.执行消费的操作
while (true) {
//100ms消费一次
//kafkaConsumer.poll(100)读出来,读到records
ConsumerRecords<String, String> records = kafkaConsumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.println("-----------------");
//打印偏移量,key,value
System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value());
System.out.println();
}
}
}
}
三、部分打印结果