package kafka;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
/**
* 生产者
*/
public class ProducerApi {
public static void main(String[] args) {
Properties props = new Properties();
props.setProperty("bootstrap.servers","hadoop01:9092,hadoop02:9092,hadoop03:9092");
//key和value的序列化方式
props.setProperty("key.serializer", StringSerializer.class.getName());
props.setProperty("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
/**
* 发送数据的时候做出应答
* 取值范围[all,-1,0,1]
* 默认 :1
* 0:leader不做任何应答
* 1:leader会给producer做出应答
* all,-1: follower-->leader-->producer
*/
props.setProperty("acks","1");
//创建一个生产者的客户端实例
KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(props);
int i=1;
while(i<1000){
int partitionNumber =i%3;
/**
* partition:如果指定了分区编号,则数据肯定写入到指定的分区里面
* key:如果没有指定分区,这条会根据key hash决定该条数据写入到那个分区
* value:数据本身
*/
//封装一条数据 //topic 指定的分区 key value
ProducerRecord pro = new ProducerRecord("user", partitionNumber, "", "" + i);
//打印数据
System.out.println("record="+pro);
//发送数据
kafkaProducer.send(pro);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
i++;
}
kafkaProducer.close();
System.out.println("over");
}
}
package kafka;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
/**
* 消费者
*/
public class ConsumerApi {
public static void main(String[] args) {
HashMap<String, Object> config = new HashMap<String, Object>();
config.put("bootstrap.servers","hadoop01:9092,hadoop02:9092,hadoop03:9092");
config.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
config.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
config.put("group.id","day001");
//[latest,earliest,none]
/**
* earliest:
* 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
* latest:
* 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
* none
* topic各分区都存在已提交的offset时,从offset后开始消费,只要有一个分区不存在已提交的offset,则抛出异常
*/
config.put("auto.offset.reset","earliest");
//从哪个位置开始消费数据
config.put("enable.auto.commit",false);
//是否要自动递交偏移量(offset),0.10版本递交到broker
//创建消费者客户端实例
KafkaConsumer<String,String> kafkaConsumer = new KafkaConsumer<>(config);
//订阅主题(告诉客户端从哪个主题获取数据)
kafkaConsumer.subscribe(Arrays.asList("user"));
while(true){
//拉取数据,他会从kafka所在的分区下拉取数据
ConsumerRecords<String, String> records = kafkaConsumer.poll(2000);
Iterator<ConsumerRecord<String, String>> iterator = records.iterator();
while (iterator.hasNext()){
ConsumerRecord<String, String> record = iterator.next();
System.out.println("record="+record);
}
}
}
}