导入依赖
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.11.0.0</version>
</dependency>
创建生产者(过时的API)
package com.buba.kafka;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
public class OldProducer {
@SuppressWarnings("deprecation")
public static void main(String[] args) {
Properties properties = new Properties();
properties.put("metadata.broker.list", "hadoop102:9092");
properties.put("request.required.acks", "1");
properties.put("serializer.class", "kafka.serializer.StringEncoder");
Producer<Integer, String> producer = new Producer<Integer,String>(new ProducerConfig(properties));
KeyedMessage<Integer, String> message = new KeyedMessage<Integer, String>("first", "hello world");
producer.send(message );
}
}
创建生产者(新API)
package com.buba.kafka.producer;
import java.util.Properties;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
public class NewProducer {
public static void main(String[] args) {
//1.配置属性值
Properties props = new Properties();
// Kafka服务端的主机名和端口号
props.put("bootstrap.servers", "hadoop-senior01.buba.com:9092");
// 所有副本都必须应答后再发送
props.put("acks", "all");
// 发送失败后,再重复发送的次数
props.put("retries", 0);
// 一批消息处理大小
props.put("batch.size", 16384);
// 请求时间间隔
props.put("linger.ms", 1);
// 发送缓存区内存大小
props.put("buffer.memory", 33554432);
// key序列化
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// value序列化
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//2.定义kafka生产者
Producer<String, String> producer = new KafkaProducer<>(props);
//3.发送消息
for (int i = 0; i < 50; i++) {
//参数1:要发送消息到哪个topic 参数2,3:key,value
producer.send(new ProducerRecord<String, String>("first", Integer.toString(i), "hello world-" + i));
}
//4.关闭资源
producer.close();
}
}
可以看到first这个topic是一个分区,消费消息的时候,按顺序排序的
创建一个second三个分区的topic,然后修改代码里的topic进行测试,可以看到消费出来的虽然不是连续的,但是在每个分区当中是连续的.
创建生产者带回调函数(新API)可以用来验证是否成功消费
package com.buba.kafka.producer;
import java.util.Properties;
import org.apache.kafka.clients.producer.*;
public class CallBackProducer {
public static void main(String[] args) {
//1.配置属性值
Properties props = new Properties();
// Kafka服务端的主机名和端口号
props.put("bootstrap.servers", "hadoop-senior01.buba.com:9092");
// 所有副本都必须应答后再发送
props.put("acks", "all");
// 发送失败后,再重复发送的次数
props.put("retries", 0);
// 一批消息处理大小
props.put("batch.size", 16384);
// 请求时间间隔
props.put("linger.ms", 1);
// 发送缓存区内存大小
props.put("buffer.memory", 33554432);
// key序列化
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// value序列化
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//2.定义kafka生产者
Producer<String, String> producer = new KafkaProducer<>(props);
//3.发送消息
for (int i = 0; i < 50; i++) {
//参数1:要发送消息到哪个topic 参数2,3:key,value
producer.send(new ProducerRecord<String, String>("second", Integer.toString(i), "hello world-" + i),new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (metadata != null) {
//输出分区+偏移量
System.err.println(metadata.partition() + "---" + metadata.offset());
}
}
});
}
//4.关闭资源
producer.close();
}
}