Kafka单机版搭建
Kafka版本为2.12-2.2.1,Zookeeper版本为3.4.14,运行环境为CentOS7.2,JDK为openJDK1.8
##########################################################################################
#zookeeper download:
wget http://mirror.bit.edu.cn/apache/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
#setting conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/tmp/zookeeper
clientPort=2181
server.0=192.168.31.201:2888:3888
#before start zookeeper, ensure close firewall and iptables
systemctl status firewalld
systemctl stop firewalld
systemctl disable firewalld
systemctl status iptables
systemctl stop iptables
systemctl disable iptables
iptables -F
#start zookeeper
bash bin/zkServer.sh start
##########################################################################################
#kafka download:
wget http://mirror.bit.edu.cn/apache/kafka/2.2.1/kafka_2.12-2.2.1.tgz
#setting config/server.properties. use shell command: grep -Ev "^$|^[#;]" config/server.properties
broker.id=0
listeners=PLAINTEXT://192.168.31.229:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.31.201:2181
zookeeper.connection.timeout.ms=6000
#before start kafka, ensure close firewall and iptables
systemctl status firewalld
systemctl stop firewalld
systemctl disable firewalld
systemctl status iptables
systemctl stop iptables
systemctl disable iptables
iptables -F
#start kafka
bash bin/kafka-server-start.sh config/server.properties
##########################################################################################
#create topic:
bash bin/kafka-topics.sh --create -zookeeper 192.168.31.201:2181 --replication-factor 1 --partitions 1 --topic test
#list topic:
bash bin/kafka-topics.sh --zookeeper 192.168.31.201:2181 --describe --topic test
#produce message:
bash bin/kafka-console-producer.sh --broker-list 192.168.31.229:9092 --topic test
#consume message:
bash bin/kafka-console-consumer.sh --bootstrap-server 192.168.31.229:9092 --topic topic_create --from-beginning
bash bin/kafka-console-consumer.sh --bootstrap-server 192.168.31.229:9092 --topic topic_create
引入maven依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.2.1</version>
</dependency>
Topic管理操作
- 创建Topic
@Test
public void createTopic() throws InterruptedException
{
Properties props = FileUtils.getProps("config/adminclient.properties");
AdminClient client = AdminClient.create(props);
NewTopic newTopic1 = new NewTopic("topic005", 1, (short) 1);
NewTopic newTopic2 = new NewTopic("topic006", 1, (short) 1);
CreateTopicsResult result =client.createTopics(Arrays.asList(newTopic1, newTopic2));
result.values().forEach((topicCallback, future) ->
{
try
{
future.get(5, TimeUnit.SECONDS);
if (future.isDone())
{
LOGGER.info("Create topic success. topic name: {}", topicCallback);
}
}
catch (Exception e)
{
LOGGER.error("Create topic failed. topic name: {}, error msg: {}", topicCallback, e.getMessage());
}
});
}
- 删除Topic
@Test
public void deleteTopic()
{
Properties props = FileUtils.getProps("config/adminclient.properties");
AdminClient client = AdminClient.create(props);
DeleteTopicsResult result = client.deleteTopics(Arrays.asList("topic005", "topic006"));
result.values().forEach((topicCallback, future) ->
{
try
{
future.get(5, TimeUnit.SECONDS);
if (future.isDone())
{
LOGGER.info("Delete topic success. topic name: {}", topicCallback);
}
}
catch (Exception e)
{
LOGGER.error("Delete topic failed. topic name: {}, error msg: {}", topicCallback, e.getMessage());
}
});
}
- 查询Topic
@Test
public void listTopic()
{
Properties props = FileUtils.getProps("config/adminclient.properties");
AdminClient client = AdminClient.create(props);
ListTopicsResult result = client.listTopics();
KafkaFuture<Collection<TopicListing>> future = result.listings();
try
{
Collection<TopicListing> topicListings = future.get(5, TimeUnit.SECONDS);
if (future.isDone())
{
topicListings.stream().forEach(topicListing ->
{
LOGGER.info("Topic: {}", topicListing.toString());
});
}
}
catch (Exception e)
{
LOGGER.error("List topic failed. error msg: {}", e.getMessage());
}
}
生产消息
@Test
public void produceMessage() throws InterruptedException
{
Properties props = FileUtils.getProps("config/produce.properties");
Producer<String, String> producer = new KafkaProducer<>(props);
CountDownLatch produceFinish = new CountDownLatch(1);
ScheduledExecutorService produceService = Executors.newSingleThreadScheduledExecutor();
produceService.scheduleAtFixedRate(() ->
{
ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC_NAME_CREATE,
"key", "message-" + Integer.toString(count.get()));
//异步生产消息
producer.send(record, (recordMetadata, e) ->
{
if (!Objects.isNull(e))
{
LOGGER.error("produce message failed. error msg: {}", e.getMessage());
}
else
{
LOGGER.info("produce message success.{}, partition:{}, offset: {}",
count.incrementAndGet(), recordMetadata.partition(), recordMetadata.offset());
}
});
}, 0, PRODUCE_PERIOD, TimeUnit.MILLISECONDS);
//等待主线程结束
produceFinish.await();
producer.close();
}
消费消息
@Test
public void consumeMessage() throws InterruptedException
{
Properties props = FileUtils.getProps("config/consume.properties");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
CountDownLatch consumeFinish = new CountDownLatch(1);
//订阅主题
consumer.subscribe(Collections.singletonList(TOPIC_NAME_CREATE));
ScheduledExecutorService consumeService = Executors.newSingleThreadScheduledExecutor();
consumeService.scheduleAtFixedRate(() ->
{
ConsumerRecords<String, String> records = consumer.poll(CONSUME_PERIOD);
records.forEach(record ->
{
LOGGER.info("receive key:{}, value:{}, offset:{}",
record.key(), record.value(), record.offset());
});
}, 0, 1000, TimeUnit.MILLISECONDS);
//等待主线程结束
consumeFinish.await();
}
Gitbug地址 Kafka API操作Demo