CC00037.kafka——|Hadoop&kafka.V22|——|kafka.v22|消费者拦截器实验案例.v02|

一、创建一个maven项目:demo-09-kafka-consumerInterceptors
### --- 添加pom.xml依赖

    <dependencies>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>1.0.2</version>
        </dependency>

        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-simple</artifactId>
            <version>1.7.25</version>
            <scope>test</scope>
        </dependency>

    </dependencies>
二、消费者拦截器编程实现
### --- package com.yanqi.kafka.demo.consumer

package com.yanqi.kafka.demo.consumer;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.util.Collections;
import java.util.Properties;

public class MyConsumer {
    public static void main(String[] args) {
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node1:9092");
        props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "mygrp");
//        props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "myclient");
        // 如果在kafka中找不到当前消费者的偏移量,则设置为最旧的
        props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        // 配置拦截器
        // One -> Two -> Three,接收消息和发送偏移量确认都是这个顺序
        props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG,
                        "com.yanqi.kafka.demo.interceptor.OneInterceptor" +
                        ",com.yanqi.kafka.demo.interceptor.TwoInterceptor" +
                        ",com.yanqi.kafka.demo.interceptor.ThreeInterceptor"
                );

        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);

        // 订阅主题
        consumer.subscribe(Collections.singleton("tp_demo_01"));

        while (true) {
            final ConsumerRecords<String, String> records = consumer.poll(3_000);

            records.forEach(record -> {
                System.out.println(record.topic()
                        + "\t" + record.partition()
                        + "\t" + record.offset()
                        + "\t" + record.key()
                        + "\t" + record.value());
            });

//            consumer.commitAsync();
//            consumer.commitSync();

        }

//        consumer.close();

    }
}
### --- package com.yanqi.kafka.demo.interceptor;

package com.yanqi.kafka.demo.interceptor;

import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.Map;

public class OneInterceptor implements ConsumerInterceptor<String, String> {
    @Override
    public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
        // poll方法返回结果之前最后要调用的方法

        System.out.println("One -- 开始");

        // 消息不做处理,直接返回
        return records;
    }

    @Override
    public void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets) {
        // 消费者提交偏移量的时候,经过该方法
        System.out.println("One -- 结束");
    }

    @Override
    public void close() {
        // 用于关闭该拦截器用到的资源,如打开的文件,连接的数据库等
    }

    @Override
    public void configure(Map<String, ?> configs) {
        // 用于获取消费者的设置参数
        configs.forEach((k, v) -> {
            System.out.println(k + "\t" + v);
        });
    }
}
### --- package com.yanqi.kafka.demo.interceptor;

package com.yanqi.kafka.demo.interceptor;

import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.Map;

public class TwoInterceptor implements ConsumerInterceptor<String, String> {
    @Override
    public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
        // poll方法返回结果之前最后要调用的方法

        System.out.println("Two -- 开始");

        // 消息不做处理,直接返回
        return records;
    }

    @Override
    public void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets) {
        // 消费者提交偏移量的时候,经过该方法
        System.out.println("Two -- 结束");
    }

    @Override
    public void close() {
        // 用于关闭该拦截器用到的资源,如打开的文件,连接的数据库等
    }

    @Override
    public void configure(Map<String, ?> configs) {
        // 用于获取消费者的设置参数
        configs.forEach((k, v) -> {
            System.out.println(k + "\t" + v);
        });
    }
}
### --- package com.yanqi.kafka.demo.interceptor;

package com.yanqi.kafka.demo.interceptor;

import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.Map;

public class ThreeInterceptor implements ConsumerInterceptor<String, String> {
    @Override
    public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
        // poll方法返回结果之前最后要调用的方法

        System.out.println("Three -- 开始");

        // 消息不做处理,直接返回
        return records;
    }

    @Override
    public void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets) {
        // 消费者提交偏移量的时候,经过该方法
        System.out.println("Three -- 结束");
    }

    @Override
    public void close() {
        // 用于关闭该拦截器用到的资源,如打开的文件,连接的数据库等
    }

    @Override
    public void configure(Map<String, ?> configs) {
        // 用于获取消费者的设置参数
        configs.forEach((k, v) -> {
            System.out.println(k + "\t" + v);
        });
    }
}
四、编译打印
### --- 准备数据资源

[root@hadoop ~]# kafka-topics.sh --zookeeper localhost:2181/myKafka --list
tp_demo_01
### --- 编译打印

D:\JAVA\jdk1.8.0_231\bin\java.exe "-javaagent:D:\IntelliJIDEA\IntelliJ IDEA 2019.3.3\lib\idea_rt.jar=55670:D:\IntelliJIDEA\IntelliJ IDEA 2019.3.3\bin" -Dfile.encoding=UTF-8 -classpath D:\JAVA\jdk1.8.0_231\jre\lib\charsets.jar;D:\JAVA\jdk1.8.0_231\jre\lib\deploy.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\access-bridge-64.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\cldrdata.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\dnsns.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\jaccess.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\jfxrt.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\localedata.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\nashorn.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunec.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunjce_provider.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunmscapi.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunpkcs11.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\zipfs.jar;D:\JAVA\jdk1.8.0_231\jre\lib\javaws.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jce.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jfr.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jfxswt.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jsse.jar;D:\JAVA\jdk1.8.0_231\jre\lib\management-agent.jar;D:\JAVA\jdk1.8.0_231\jre\lib\plugin.jar;D:\JAVA\jdk1.8.0_231\jre\lib\resources.jar;D:\JAVA\jdk1.8.0_231\jre\lib\rt.jar;E:\NO.Z.10000——javaproject\NO.Z.00002.Hadoop\kafka_demo\demo-09-kafka-consumer-Interceptor\target\classes;C:\Users\Administrator\.m2\repository\org\apache\kafka\kafka-clients\1.0.2\kafka-clients-1.0.2.jar;C:\Users\Administrator\.m2\repository\org\lz4\lz4-java\1.4\lz4-java-1.4.jar;C:\Users\Administrator\.m2\repository\org\xerial\snappy\snappy-java\1.1.4\snappy-java-1.1.4.jar;C:\Users\Administrator\.m2\repository\org\slf4j\slf4j-api\1.7.25\slf4j-api-1.7.25.jar com.yanqi.kafka.demo.consumer.MyConsumer
key.deserializer    org.apache.kafka.common.serialization.StringDeserializer
value.deserializer  org.apache.kafka.common.serialization.StringDeserializer
group.id    mygrp
interceptor.classes com.yanqi.kafka.demo.interceptor.OneInterceptor,com.yanqi.kafka.demo.interceptor.TwoInterceptor,com.yanqi.kafka.demo.interceptor.ThreeInterceptor
bootstrap.servers   node1:9092
auto.offset.reset   earliest
client.id   consumer-1
key.deserializer    org.apache.kafka.common.serialization.StringDeserializer
value.deserializer  org.apache.kafka.common.serialization.StringDeserializer
group.id    mygrp
interceptor.classes com.yanqi.kafka.demo.interceptor.OneInterceptor,com.yanqi.kafka.demo.interceptor.TwoInterceptor,com.yanqi.kafka.demo.interceptor.ThreeInterceptor
bootstrap.servers   node1:9092
auto.offset.reset   earliest
client.id   consumer-1
key.deserializer    org.apache.kafka.common.serialization.StringDeserializer
value.deserializer  org.apache.kafka.common.serialization.StringDeserializer
group.id    mygrp
interceptor.classes com.yanqi.kafka.demo.interceptor.OneInterceptor,com.yanqi.kafka.demo.interceptor.TwoInterceptor,com.yanqi.kafka.demo.interceptor.ThreeInterceptor
bootstrap.servers   node1:9092
auto.offset.reset   earliest
client.id   consumer-1
One -- 结束
Two -- 结束
Three -- 结束
~~~消费者消费数据
One -- 结束
Two -- 结束
Three -- 结束
### --- 生产者生产数据

[root@hadoop ~]# kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic tp_demo_01
~~~生产者生产数据
hello yanqi1
hello yanqi2
hello yanqi3
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

yanqi_vip

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值