消息发送流程
Kafka 的 Producer 发送消息采用的是异步发送的方式。在消息发送的过程中,涉及到了
两个线程——main 线程和 Sender 线程,以及一个线程共享变量——RecordAccumulator。
main 线程将消息发送给 RecordAccumulator,Sender 线程不断从 RecordAccumulator 中拉取
消息发送到 Kafka broker。
自定义Producer
package my.test.Producer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
/**
* @author WGY
* 简单的producer
*/
public class MyProducer {
public static void main(String[] args) {
//1、创建Kafka生产者信息
Properties prop = new Properties();
//2、kafka 集群,broker-list
// prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop1:9092");
prop.put("bootstrap.servers", "hadoop1:9092");
//3、ack应答级别 4-7可以不写 有默认值
prop.put("acks", "all");
//4、重试次数
prop.put("retries", 2);
//5、批次大小16k--到16k就提交
prop.put("batch.size", 16384);
//6、等待时间--到1毫秒就提交
prop.put("linger.ms", 1);
//7、RecordAccumulator 缓冲区大小--32M
prop.put("buffer.memory", 33554432);
//8、指明kv的序列化类
prop.put("key.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
prop.put("value.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
//9、创建生产者对象
KafkaProducer<String, String> producer = new KafkaProducer<>(prop);
//10、发送数据
for (int i = 0; i < 10; i++) {
System.out.println(i+1);
//先创建这个分区
producer.send(new ProducerRecord<String,String>("first1","aaa"+i));
}
// for (int i = 0; i < 10; i++) {
// System.out.println(i+1);
// //先创建这个分区 指定分区和key key在消费者端不显示
// producer.send(new ProducerRecord<String,String>("first1",0,"asd","aaa"+i));
// }
//关闭资源 不关闭虚拟机上消费者接收不到
producer.close();
}
}
使用回调函数的Producer
package my.test.Producer;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.util.Properties;
/**
* @author WGY
* 回调函数的producer
*/
public class CallBackProducer {
public static void main(String[] args) {
//1、创建配置信息
Properties prop = new Properties();
prop.put("bootstrap.servers", "hadoop1:9092");
prop.put("key.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
prop.put("value.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer<String, String> producer = new KafkaProducer<>(prop);
for (int i = 0; i < 10; i++) {
System.out.println(i+1);
//先创建这个分区
producer.send(new ProducerRecord<String, String>("first1", "测试" + i), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e == null){
System.out.println(recordMetadata.partition()+"--"+recordMetadata.offset());
}else{
e.printStackTrace();
}
}
});
}
//关闭资源 不关闭虚拟机上消费者接收不到
producer.close();
}
}
自定义分区Partition
package my.test.Partitioner;
import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import java.util.Map;
/**
* @author WGY
* 自定义分区
*/
public class MyPartitioner implements Partitioner {
@Override//这里的Object类型 表示已经序列化之后了
public int partition(String topic, Object key, byte[] bytes, Object value, byte[] bytes1, Cluster cluster) {
// //当前传入的topic有多少个分区
// Integer integer = cluster.partitionCountForTopic(topic);
// int a = key.toString().hashCode() % integer;
// return a;
//此处根据业务逻辑自行编写
return 0;
}
@Override//关闭的
public void close() {
}
@Override//读配置信息
public void configure(Map<String, ?> map) {
}
}
使用自定义分区的Producer
package my.test.Partitioner;
import org.apache.kafka.clients.producer.*;
import java.util.Properties;
/**
* @author WGY
* 使用自定义分区的producer
*/
public class PartitionProducer {
public static void main(String[] args) {
Properties prop = new Properties();
// prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop1:9092");
prop.put("bootstrap.servers", "hadoop1:9092");
prop.put("acks", "all");
prop.put("key.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
prop.put("value.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
//添加自定义分区器
prop.put("partitioner.class","my.test.Partitioner.MyPartitioner");
//9、创建生产者对象
KafkaProducer<String, String> producer = new KafkaProducer<>(prop);
//10、发送数据
for (int i = 0; i < 10; i++) {
System.out.println(i+1);
//先创建这个分区
producer.send(new ProducerRecord<String, String>("first1", "aaa" + i), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e == null){
System.out.println(recordMetadata.partition()+"--"+recordMetadata.offset());
}else{
e.printStackTrace();
}
}
});
}
//关闭资源 不关闭虚拟机上消费者接收不到
producer.close();
}
}