Kafka Java API使用 / 结合springboot

Kafka 结合Java API

pom

<dependency>
  <groupId>org.apache.kafka</groupId>
  <artifactId>kafka-clients</artifactId>
  <version>2.5.1</version>
  <scope>compile</scope>
</dependency>

拦截器

通过拦截器,将时间增加在消息的最前端,并且统计消息成功和发送失败的消息数,在producer关闭时打印计数器

TimeInterceptor

public class TimeInterceptor implements ProducerInterceptor<String,String> {

    /**
     * 运行在主线程中,Producer确保在消息被序列化以及计算分区前调用该方法
     * 用户可以对消息做任何操作,但是最好不要修改所属的topic 和 分区
     * @param record
     * @return
     */
    @Override
    public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) {
        //创建新的record,时间戳写入消息题最前部
        return new ProducerRecord<>(record.topic(),record.partition()
                ,record.timestamp(),record.key(),System.currentTimeMillis()+"---"+record.value().toString());
    }

    /**
     * 消息从RecordAccumulator成功发送到 Kafka Broker后
     * 或者在发送过程中失败时调用
     * @param metadata
     * @param exception
     */
    @Override
    public void onAcknowledgement(RecordMetadata metadata, Exception exception) { }

    /**
     * 关闭interceptor,用于执行资源清理
     */
    @Override
    public void close() { }

    /**
     * 获取配置信息 和 初始化数据
     * @param configs
     */
    @Override
    public void configure(Map<String, ?> configs) { }
}

CounterInterceptor

public class CounterInterceptor implements ProducerInterceptor<String,String> {
    private int errorCount = 0;
    private int successCounter = 0;
    @Override
    public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) { return record; }

    @Override
    public void onAcknowledgement(RecordMetadata metadata, Exception exception) {
        //统计成功和失败的次数
        if (exception == null)
            successCounter++;
        else
            errorCount++;
    }

    @Override
    public void close() {
        //打印结果
        System.out.println("Successful sent: "+successCounter);
        System.out.println("Failed sent: "+errorCount);
    }

    @Override
    public void configure(Map<String, ?> configs) { }
}

生产者

Producer对象需要传入配置文件,传入Properties类型的键值对

public class InterceptorProducer extends KafkaProducer {
    private static Properties properties = new Properties();

    static {
        //配置信息
        properties.put("bootstrap.servers", "hadoop102:9093");
        properties.put("acks", "all");
        properties.put("retries", 3);
        properties.put("batch.size", 16384);
        properties.put("linger.ms", 1);
        properties.put("buffer.memory", 33554432);
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        //构建拦截链
        List<String> interceptors = new ArrayList<>();
        interceptors.add("myself.springboot_kafka.interceptor.TimeInterceptor");
        interceptors.add("myself.springboot_kafka.interceptor.CounterInterceptor");

        properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, interceptors);

    }

    public InterceptorProducer(Properties properties) {
        super(properties);
    }

    public static void main(String[] args) {
        InterceptorProducer interceptorProducer = new InterceptorProducer(InterceptorProducer.properties);
        String topic = "test-pc";

        for (int i = 0; i < 10; i++) {
            ProducerRecord<String,String> record = new ProducerRecord<>(topic,"message"+i);
            interceptorProducer.send(record);

        }
        interceptorProducer.close();
    }
}

消费者

public class Consumer {
    public static void main(String[] args) {
        Properties props = new Properties();

        props.put("bootstrap.servers","hadoop102:9093");
        props.put("group.id","test-consumer-group");
        props.put("enable.auto.commit","false");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String,String> consumer = new KafkaConsumer<String, String>(props);

        //消费者订阅主题
        consumer.subscribe(Arrays.asList("test-pc"));

        while (true){
            //消费者拉取数据
            ConsumerRecords<String,String> records = consumer.poll(100);

            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("offset = %d, keys = %s, value = %s%n",record.offset(),record.key(),record.value());
            }
            //同步提交,当前线程回阻塞直到offset提交成功
            consumer.commitSync();
        }
    }
}

Kafka 结合SpringBoot

pom.xml 和 application.properties

<dependency>
	<groupId>org.springframework.kafka</groupId>
	<artifactId>spring-kafka</artifactId>
</dependency>
#============== kafka ===================# 指定kafka 代理地址,可以多个
spring.kafka.bootstrap-servers=hadoop102:9093

##=============== provider  =======================
#spring.kafka.producer.retries=0
## 指定消息key和消息体的编解码方式
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer

##=============== consumer  =======================# 指定默认消费者group id
spring.kafka.consumer.group-id=test-consumer-group
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.enable-auto-commit=true
spring.kafka.consumer.auto-commit-interval=100

## 指定消息key和消息体的编解码方式
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer

生产者

@Component
@Slf4j
public class SpringBootProducer {

    @Autowired
    private KafkaTemplate<String,String> kafkaTemplate;

    public void send(){
        Msg msg = new Msg(UUID.randomUUID().toString(),new Date());
        log.info("+++++++++++++++  message = {}",msg.toString());

        kafkaTemplate.send("springboot-topic",msg.toString());
    }

}

消费者

@Component
@Slf4j
public class SpringBootConsumer {
    @KafkaListener(topics = "springboot-topic")
    public void listen(ConsumerRecord<?,?> record){
        //判断消息是否为空
        Optional<?> value = Optional.ofNullable(record.value());
        //当前存在消息则执行
        if (value.isPresent()){
            Object msg = value.get();
            log.info("---------------- record = " + record);
            log.info("---------------- message = " + msg);
        }
        }
    }
}

SpringBoot启动程序

@SpringBootApplication
@Slf4j
public class SpringbootKafkaApplication {

	public static void main(String[] args) {
		ConfigurableApplicationContext context = SpringApplication.run(SpringbootKafkaApplication.class, args);
		SpringBootProducer producer = context.getBean(SpringBootProducer.class);

		for (int i = 0; i < 3; i++) {
			log.info("============第 "+i+" 条消息============");
			producer.send();
			try {
				Thread.sleep(3000);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}
	}
}

从日志可以看到主动调用生产者,消费者自动执行,第一条信息没有打印是因为kafka在生产者生成第一条消息时才加载配置信息,第一条生产信息在springboot刚执行的时候就打印了。
在这里插入图片描述

在这里插入图片描述

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值