producer发送消息,consumer消费消息
public class producer1 {
public static void main(String[] args) {
Properties prop = new Properties();
//1.配置kafka节点地址
prop.put("bootstrap.servers","192.168.232.132:9092");
//2.发送消息是否应答
prop.put("acks","all");
//3.配置发送消息失败重试
prop.put("retries","0");
//4.配置批量处理消息大小
prop.put("batch.size" ,"10241");
//5.配置批量处理数据延迟
prop.put("linger.ms","5");
//6.配置内存缓冲大小
prop.put("buffer.memory","1234321");
//7.信息发送前必须序列化
prop.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
prop.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
//实例化
KafkaProducer<String,String> prodecer = new KafkaProducer<String,String>(prop);
for (int i = 0; i <99; i++){
prodecer.send(new ProducerRecord<String, String>("aa", "hah" + i), new Callback() {
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (metadata!=null) {
System.out.println(metadata.topic() + "..." + metadata.offset() + "..." + metadata.partition());
}
}
});
}
prodecer.close();
}
}
public class Consumer1 {
public static void main(String[] args) {
//1.配置消费者属性
Properties prop = new Properties();
//配置属性
//服务器地址指定
prop.put("bootstrap.servers", "192.168.232.132:9092");
//配置消费者组
prop.put("group.id", "g1");
//配置是否自动确认offset
prop.put("enable.auto.commit", "true");
//序列化
prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//2.实例消费者
final KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(prop);
//4.释放资源 线程安全
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
public void run() {
if(consumer != null) {
consumer.close();
}
}
}));
//订阅消息主题
consumer.subscribe(Arrays.asList("aa"));
//3.拉消息 推push 拉poll
while(true) {
ConsumerRecords<String,String> records = consumer.poll(1000);
//遍历消息
for(ConsumerRecord<String,String> record:records) {
System.out.println(record.topic() + "------" + record.value());
}
}
}
}
设置分区与指定分区消费
public class Patition1 implements Partitioner{
//设置
public void configure(Map<String, ?> configs) {
}
//分区逻辑
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
return 1;
}
//释放资源
public void close() {
}
}
public class Producer2 {
public static void main(String[] args) {
// 1.配置生产者属性(指定多个参数)
Properties prop = new Properties();
// 参数配置
// kafka节点的地址
prop.put("bootstrap.servers", "192.168.232.132:9092");
// 发送消息是否等待应答
prop.put("acks", "all");
// 配置发送消息失败重试
prop.put("retries", "0");
// 配置批量处理消息大小
prop.put("batch.size", "10241");
// 配置批量处理数据延迟
prop.put("linger.ms", "5");
// 配置内存缓冲大小
prop.put("buffer.memory", "12341235");
// 消息在发送前必须序列化
prop.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
prop.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
prop.put("partitioner.class", "com.itstar.kafka.kafka_producer.Patition1");
//2.实例化producer
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(prop);
//3.发送消息
for(int i = 0;i<99;i++) {
producer.send(new ProducerRecord<String, String>("yuandan", "hunterhenshuai" + i), new Callback() {
public void onCompletion(RecordMetadata metadata, Exception exception) {
//如果metadata不为null 拿到当前的数据偏移量与分区
if(metadata != null) {
System.out.println(metadata.topic() + "----" + metadata.offset() + "----" + metadata.partition());
}
}
});
}
//4.关闭资源
producer.close();
}
}
public class consumer {
public static void main(String[] args) {
Properties prop = new Properties();
//配置节点
prop.put("bootstrap.servers","192.168.232.132:9092");
//配置消费者组
prop.put("group.id","tt1");
//配置自动获取确定offset
prop.put("enable.auto.commit","true");
//序列化
prop.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
prop.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
//实例化consumer
final KafkaConsumer<String,String> consume = new KafkaConsumer<String, String>(prop);
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
public void run() {
if (consume!=null){
consume.close();
}
}
}));
TopicPartition pp = new TopicPartition("aa",1);
//这个是指定分区消费
// consume.assign(Arrays.asList(p));
// 指定offset开始读取
// consume.seekToBeginning(Arrays.asList(p));
//打印分区
List<PartitionInfo> parlist = consume.partitionsFor("aa");
for(PartitionInfo p : parlist){
System.out.println(p.toString());
}
//消费所有分区的,添加到List,然后assign这个List
List<TopicPartition> list = new ArrayList<TopicPartition>();
for (PartitionInfo p : parlist){
TopicPartition top = new TopicPartition("shengdan",p.partition());
list.add(top);
}
consume.assign(Arrays.asList(pp));
while (true){
ConsumerRecords<String,String> records = consume.poll(1000);
for(ConsumerRecord<String,String> record : records){
System.out.println(record.topic() +"---"+record.value());
}
}
}
}
拦截器
public class TimeInterceptor implements ProducerInterceptor<String, String>{
//配置信息
public void configure(Map<String, ?> configs) {
}
//业务逻辑
public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) {
return new ProducerRecord<String, String>(
record.topic(),
record.partition(),
record.key(),
System.currentTimeMillis() + "-" + record.value());
}
//发送失败调用
public void onAcknowledgement(RecordMetadata metadata, Exception exception) {
}
//关闭资源
public void close() {
}
在producer里
//拦截器
ArrayList<String> inList = new ArrayList<String>();
inList.add("com.itstare.kafka.interceptor.TimeInterceptor");
prop.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, inList);
就可以使用拦截器,相当于一个过滤的作用