1.maven项目:依赖
-
<dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka-clients</artifactId> <version>1.1.0</version> </dependency> <dependency> <groupId>com.google.guava</groupId> <artifactId>guava</artifactId> <version>19.0</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <version>1.7.5</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>1.7.5</version> </dependency>
2.配置文件:consumer.properties 放在resource文件夹中
-
#kafka(集群)地址和端口 bootstrap.servers=集群地址 #consumer组id group.id=test enable.auto.commit=true #key反序列化 key.deserializer=org.apache.kafka.common.serialization.StringDeserializer #value反序列化 value.deserializer=org.apache.kafka.common.serialization.StringDeserializer # 对话timeout时间 session.timeout.ms=10000 fetch.min.bytes=500 receive.buffer.bytes=10240 max.partition.fetch.bytes=20480
3.日志配置 log4j.properties
-
#对应下面的,大小写要相同 log4j.rootLogger=INFO,stdout,Console log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.Target=System.out log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n log4j.appender.Console= org.apache.log4j.DailyRollingFileAppender log4j.appender.Console.File = /home/htmlfile/logs/ms.log log4j.appender.Console.Append = true log4j.appender.Console.DatePattern = '.'yyyy-MM-dd log4j.appender.Console.layout = org.apache.log4j.PatternLayout log4j.appender.Console.layout.ConversionPattern = %d %-5p - [%l] - %m%n
4.消费端:SingleConsumer.class
-
package 包名; import com.google.common.io.Resources; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.Properties; public class SingleConsumer { //开启日志 记录相关类 private static Logger logger = LoggerFactory.getLogger(SingleConsumer.class); //创建一个consumer key-value 类型 private static KafkaConsumer<String, String> consumer; //静态代码块 ---必须运行的 static { try { //加载consumer配置文件 ---输入流 配置 Markas Resource文件夹 InputStream props = Resources.getResource("consumer.properties").openStream(); //Properties 类 Properties properties = new Properties(); //加载读取配置文件 properties.load(props); //赋值 consumer = new KafkaConsumer<>(properties); } catch (IOException e) { //日志打印连接异常 logger.error("consumer initialize failed:{}", e.getMessage()); } } public static void main(String[] args) { //订阅topic,多个主题 ,分割 Arrays.asList() 数组转化为列表 consumer.subscribe(Arrays.asList("test", "test2")); //死循环读取topic while (true) { //poll(timeout)方法获取 ConsumerRecords<String, String> records = consumer.poll(100); //遍历记录 for (ConsumerRecord<String, String> record : records) { System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value()); } } } }
ConsumerRecord(String topic, int partition, long offset, K key, V value)
ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, long checksum, int serializedKeySize, int serializedValueSize, K key, V value)
ConsumerRecord(String topic, int partition, long offset, long timestamp, record.TimestampType timestampType, Long checksum, int serializedKeySize, int serializedValueSize, K key, V value, Headers headers)
- topic - 接收到该记录的主题
- partition - 他是从中收到该记录的主题分区
- offset - 该记录在相应的Kafka分区中的偏移量
- key - 记录的键(如果存在)(允许为空)
- value - 记录内容
- timestamp - 记录的时间戳。
- timestampType - 时间戳类型
- checksum - 完整记录的校验和(CRC32)
- serializedKeySize - 序列化密钥的长度
- serializedValueSize - 序列化值的长度
- headers - 记录的标题