java kafka 集群消费_kafka 集群生产与消费类的java实现

一,生产类

import org.apache.kafka.clients.producer.ProducerConfig;

import org.apache.kafka.clients.producer.ProducerRecord;

import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;

public class KafkaProducer {

private final org.apache.kafka.clients.producer.KafkaProducer producer;

public final static String TOPIC = "test_topic";

private KafkaProducer(){

Properties props = new Properties();

props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.38.110:9092,192.168.38.111:9092,192.168.38.112:9092");

props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());

props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());

// props.put(ProducerConfig.ACKS_CONFIG)

// props.put("request.required.acks","-1");

producer = new org.apache.kafka.clients.producer.KafkaProducer(props);

}

void produce() {

int messageNo = 1;

final int COUNT = 1000;

while (messageNo < COUNT) {

String key = String.valueOf(messageNo);

String data = "hello kafka message " + key;

boolean sync = false; //是否同步

if (sync) {

try {

producer.send(new ProducerRecord(TOPIC, data)).get();

} catch (Exception e) {

e.printStackTrace();

}

} else {

producer.send(new ProducerRecord(TOPIC, data));

}

//必须写下面这句,相当于发送

producer.flush();

messageNo ++;

}

}

public static void main( String[] args ) {

new KafkaProducer().produce();

}

}

运行结果图:在kafka执行

bin/kafka-console-consumer.sh --zookeeper localhost:2181/kafka --topic test_topic --from-beginning

命令后,运行main方法,屏幕上出现如下信息

3ec4a2d80f5c5a93a7df856a46e27d0b.png

二,消费类

package com.example.kafka.controller;

import org.apache.kafka.clients.consumer.Consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;

import org.apache.kafka.clients.consumer.ConsumerRecords;

import org.apache.kafka.clients.consumer.KafkaConsumer;

import org.apache.kafka.common.serialization.StringDeserializer;

import java.util.Arrays;

import java.util.Properties;

public class KafkaConsumer1 {

void consume() {

Properties props = new Properties();

props.put("zookeeper.connect", "192.168.38.110:2181,192.168.38.111:2181,192.168.38.112:2181/kafka");

//group 代表一个消费组

props.put("group.id", "test_topic");

props.put("bootstrap.servers", "192.168.38.110:9092,192.168.38.111:9092,192.168.38.112:9092");

//zk连接超时

props.put("zookeeper.session.timeout.ms", "4000");

props.put("zookeeper.sync.time.ms", "200");

props.put("auto.commit.interval.ms", "1000");

props.put("auto.offset.reset", "latest");

//序列化类

props.put("key.deserializer", StringDeserializer.class);

props.put("value.deserializer", StringDeserializer.class);

props.put("serializer.class", "kafka.serializer.StringEncoder");

Consumer consumer = new KafkaConsumer<>(props);

consumer.subscribe(Arrays.asList("test_topic"));

while (true) {

ConsumerRecords records = consumer.poll(1000);

for (ConsumerRecord record : records) {

System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());

}

}

}

public static void main(String[] args) {

new KafkaConsumer1().consume();

}

}

运行main方法后idea控制台出现“消费”信息:

c6799096147d2280df14a79054c74a46.png

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值