kafka安装和使用

安装

java

zookeeper

cd /usr/local/zookeeper/apache-zookeeper-3.6.1-bin/conf
cp zoo_sample.cfg zoo.cfg 
tickTime=2000
dataDir=/usr/local/zookeeper/apache-zookeeper-3.6.1-bin/dataDir
dataLogDir=/usr/local/zookeeper/apache-zookeeper-3.6.1-bin/dataLogDir
clientPort=2181

initLimit=10
syncLimit=5
admin.serverPort=4888
server.1=localhost:2888:3888
  • bin目录下启动zk。
./zkServer.sh start
./zkServer.sh status
  • 客户端操作。
./zkCli.sh -server 127.0.0.1:2181
help
  • 停止服务。
./zkServer.sh stop

kafka

cd /usr/local/kafka/kafka_2.12-2.5.0/config
vim server.properties
broker.id=1
port=9092
host.name=172.16.81.77
advertised.host.name=47.114.62.101
advertised.listeners=PLAINTEXT://47.114.62.101:9092

num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/kafka_2.12-2.5.0/kafka-logs
num.partitions=2
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1

log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000

zookeeper.connect=localhost:2181/kafka1
zookeeper.connection.timeout.ms=18000

group.initial.rebalance.delay.ms=0
  • 启动kafka。
cd /usr/local/kafka/kafka_2.12-2.5.0/bin
./kafka-server-start.sh -daemon ../config/server.properties
  • 创建topic。
./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic dblab01
./kafka-topics.sh --describe --zookeeper localhost:2181 --topic dblab01
./kafka-topics.sh --list --zookeeper localhost:2181
./kafka-console-producer.sh --broker-list localhost:9092 --topic dblab01
./kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic dblab01 --from-beginning

kafka在zk中的节点

使用

  • 引入依赖。
compile 'org.apache.kafka:kafka_2.12:2.5.0'

生产者使用

  • 生产者配置。
    @Bean
    public KafkaProducer<String, String> kafkaProducer() {
        Properties pros = new Properties();
        pros.put("bootstrap.servers", "xxxxxx:9092");
        pros.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        pros.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        pros.put("acks", "-1");
        return new KafkaProducer<>(pros);
    }

  • 同步发送和异步发送。
    private void produceSync() {
        for (int i = 0; i < 3; i++) {
            try {
                ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, "key_" + i, "val_" + i);
                kafkaProducer.send(record).get();
                System.out.println("send successful: " + record.key());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

    private void produceAsync() {
        for (int i = 10; i < 13; i++) {
            try {
                ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, "key_" + i, "async_val_" + i);
                kafkaProducer.send(record, (metadata, exception) -> {
                    if (exception != null) {
                        exception.printStackTrace();
                    } else {
                        System.out.println("send successful: " + record.key());
                    }
                });
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

消费者使用

  • 消费者配置。
    @Bean
    public KafkaConsumer<String, String> kafkaConsumer() {
        Properties props = new Properties();
        props.put("bootstrap.servers", "xxxxxx:9092");
        props.put("group.id", "consumerGroup2");
        props.put("enable.auto.commit", "true");
        props.put("auto.commit.interval.ms", "1000");
        props.put("auto.offset.reset", "earliest");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        return new KafkaConsumer<>(props);
    }
  • 三种提交方式。
    public void consume() {
        //订阅主题
        kafkaConsumer.subscribe(Lists.newArrayList(TOPIC));
        new Thread(() -> {
            //轮询消费
            Duration duration = Duration.ofSeconds(1);
            while (true) {
                ConsumerRecords<String, String> records = kafkaConsumer.poll(duration);
                for (ConsumerRecord<String, String> record : records) {
                    System.out
                            .printf("offset = %d ,key =%s, value= %s%n", record.offset(), record.key(), record.value());
                    System.out.println();
                }
                //自动提交、同步提交、异步提交三选一
                kafkaConsumer.commitSync();
                kafkaConsumer.commitAsync();
            }
        }).start();
    }
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值