1.Consumer and Consumer Group 消费者与消费者分组
group.id 分组属性
client.id consumer的id
2.Kafka Consumer programming 编程coding
package com.hanwan.kafka.demo2;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
/**
* @ClassName SimpleConsumer
* @Description Consumer config demo
* @Copyright: Copyright (c) 2018</p>
* @Company: www.lowan.com</ p>
* @Author hanwan
* @Date 2018/8/17 16:24
* @Version 1.0
**/
public class SimpleConsumer {
private final static Logger LOGGER = LoggerFactory.getLogger(SimpleConsumer.class);
public static void main(String[] args) {
/*
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(loadProps());
consumer.subscribe(Collections.singleton("test_c"));
//表示永久循环
for (;;) {
ConsumerRecords<String, String> records = consumer.poll(100);
records.forEach(record ->{
LOGGER.info("-----------------------------------------------------------------");
LOGGER.info("offset {}",record.offset());
LOGGER.info("value {}", record.value());
LOGGER.info("partition {}", record.partition());
LOGGER.info("key {}",record.key());
LOGGER.info("-----------------------------------------------------------------");
});
}*/
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(loadProps2());
consumer.subscribe(Collections.singleton("test_c"));
final AtomicInteger counter = new AtomicInteger();
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
records.forEach(record ->{
LOGGER.info("--------------------------------------------------------");
LOGGER.info("offset {}", record.offset());
LOGGER.info("value {}", record.value());
LOGGER.info("key {}", record.key());
LOGGER.info("partition {}", record.partition());
LOGGER.info("--------------------------------------------------------");
int cnt = counter.getAndIncrement();
if (cnt >= 3) {
Runtime.getRuntime().halt(-1);
}
});
}
}
private static Properties loadProps(){
final Properties prop = new Properties();
prop.put("bootstrap.servers", "120.55.125.58:9092,120.26.198.248:9092,121.40.200.37:9092");
prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
prop.put("group.id", "test_group3");
prop.put("client.id", "demo-consumer-client");
return prop;
}
private static Properties loadProps2(){
final Properties prop = new Properties();
prop.put("bootstrap.servers", "120.55.125.58:9092,120.26.198.248:9092,121.40.200.37:9092");
prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
prop.put("group.id","test_group4");
prop.put("client.id", "demo-consumer-client");
prop.put("auto.offset.reset", "earliest");
prop.put("enable.auto.commit", "true"); //默认为true自动提交
prop.put("auto.commit.interval.ms", "10000"); //设置默认自动提交时间 ,默认值为5000ms
return prop;
}
}
package com.hanwan.kafka.demo2;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
/**
* @ClassName ConsumerSyncCommit
* @Description Consumer 同步commit
* @Copyright: Copyright (c) 2018</p>
* @Company: www.lowan.com</ p>
* @Author hanwan
* @Date 2018/8/20 10:07
* @Version 1.0
**/
public class ConsumerSyncCommit {
private final static Logger LOGGER = LoggerFactory.getLogger(ConsumerSyncCommit.class);
public static void main(String[] args) {
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(loadProps());
consumer.subscribe(Collections.singleton("test12"));
final AtomicInteger count = new AtomicInteger(0);
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
records.forEach(record ->{
LOGGER.info("key {}", record.key());
LOGGER.info("partition {}", record.partition());
LOGGER.info("offset {}", record.offset());
LOGGER.info("value {}", record.value());
// if (count.incrementAndGet() == 100) {
// consumer.commitSync();
// count.set(0);
// }
});
/*
* can retry
* block
* */
consumer.commitSync();
}
}
private static Properties loadProps(){
final Properties prop = new Properties();
prop.put("bootstrap.servers", "120.55.125.58:9092,120.26.198.248:9092,121.40.200.37:9092");
prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
prop.put("group.id", "test_group1");
prop.put("client.id", "demo-consumer-client");
prop.put("auto.offset.reset", "earliest");
prop.put("enable.auto.commit", "false");
return prop;
}
}
package com.hanwan.kafka.demo2;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Properties;
/**
* @ClassName ConsumerAsyncCommit
* @Description Consumer 异步commit
* @Copyright: Copyright (c) 2018</p>
* @Company: www.lowan.com</ p>
* @Author hanwan
* @Date 2018/8/20 13:28
* @Version 1.0
**/
public class ConsumerAsyncCommit {
private final static Logger LOGGER = LoggerFactory.getLogger(ConsumerAsyncCommit.class);
public static void main(String[] args) {
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(loadProp());
consumer.subscribe(Collections.singleton("test12"));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
records.forEach(record ->{
LOGGER.info("key {}", record.key());
LOGGER.info("partition {}", record.partition());
LOGGER.info("offset {}", record.offset());
LOGGER.info("value {}", record.value());