安装
java
zookeeper
cd /usr/local/zookeeper/apache-zookeeper-3.6.1-bin/conf
cp zoo_sample.cfg zoo.cfg
tickTime=2000
dataDir=/usr/local/zookeeper/apache-zookeeper-3.6.1-bin/dataDir
dataLogDir=/usr/local/zookeeper/apache-zookeeper-3.6.1-bin/dataLogDir
clientPort=2181
initLimit=10
syncLimit=5
admin.serverPort=4888
server.1=localhost:2888:3888
./zkServer.sh start
./zkServer.sh status
./zkCli.sh -server 127.0.0.1:2181
help
./zkServer.sh stop
kafka
cd /usr/local/kafka/kafka_2.12-2.5.0/config
vim server.properties
broker.id=1
port=9092
host.name=172.16.81.77
advertised.host.name=47.114.62.101
advertised.listeners=PLAINTEXT://47.114.62.101:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/kafka_2.12-2.5.0/kafka-logs
num.partitions=2
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181/kafka1
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
cd /usr/local/kafka/kafka_2.12-2.5.0/bin
./kafka-server-start.sh -daemon ../config/server.properties
./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic dblab01
./kafka-topics.sh --describe --zookeeper localhost:2181 --topic dblab01
./kafka-topics.sh --list --zookeeper localhost:2181
./kafka-console-producer.sh --broker-list localhost:9092 --topic dblab01
./kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic dblab01 --from-beginning
kafka在zk中的节点
使用
compile 'org.apache.kafka:kafka_2.12:2.5.0'
生产者使用
@Bean
public KafkaProducer<String, String> kafkaProducer() {
Properties pros = new Properties();
pros.put("bootstrap.servers", "xxxxxx:9092");
pros.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
pros.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
pros.put("acks", "-1");
return new KafkaProducer<>(pros);
}
private void produceSync() {
for (int i = 0; i < 3; i++) {
try {
ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, "key_" + i, "val_" + i);
kafkaProducer.send(record).get();
System.out.println("send successful: " + record.key());
} catch (Exception e) {
e.printStackTrace();
}
}
}
private void produceAsync() {
for (int i = 10; i < 13; i++) {
try {
ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, "key_" + i, "async_val_" + i);
kafkaProducer.send(record, (metadata, exception) -> {
if (exception != null) {
exception.printStackTrace();
} else {
System.out.println("send successful: " + record.key());
}
});
} catch (Exception e) {
e.printStackTrace();
}
}
}
消费者使用
@Bean
public KafkaConsumer<String, String> kafkaConsumer() {
Properties props = new Properties();
props.put("bootstrap.servers", "xxxxxx:9092");
props.put("group.id", "consumerGroup2");
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.offset.reset", "earliest");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
return new KafkaConsumer<>(props);
}
public void consume() {
kafkaConsumer.subscribe(Lists.newArrayList(TOPIC));
new Thread(() -> {
Duration duration = Duration.ofSeconds(1);
while (true) {
ConsumerRecords<String, String> records = kafkaConsumer.poll(duration);
for (ConsumerRecord<String, String> record : records) {
System.out
.printf("offset = %d ,key =%s, value= %s%n", record.offset(), record.key(), record.value());
System.out.println();
}
kafkaConsumer.commitSync();
kafkaConsumer.commitAsync();
}
}).start();
}