spring 集成kafka

3 篇文章 1 订阅

本文主要简单梳理梳理java应用中生产/消费kafka消息的一些使用。

1. 基于java版的kafka client与spring进行集成

spring-messaging-5.2.12.RELEASE.jar
spring-kafka-2.1.6.RELEASE.jar
kafka-clients-1.1.0.jar
需要这三个Jar,可以去maven仓库去下载

https://mvnrepository.com/artifact/org.slf4j 仓库地址

2.web项目集成kafka

  1. kafka.properties
################# kafka producer ##################
# brokers集群
kafka.producer.bootstrap.servers = 10.123.1.151:9092

# 应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
kafka.producer.acks = all
#发送失败重试次数
kafka.producer.retries = 3
kafka.producer.linger.ms =  10
# 33554432 即32MB的批处理缓冲区
kafka.producer.buffer.memory = 40960
#批处理条数:当多个记录被发送到同一个分区时,生产者会尝试将记录合并到更少的请求中。这有助于客户端和服务器的性能
kafka.producer.batch.size = 4096
#发布主题
kafka.producer.defaultTopic = SW_EFILES
kafka.producer.key.serializer = org.apache.kafka.common.serialization.StringSerializer
kafka.producer.value.serializer = org.apache.kafka.common.serialization.StringSerializer




################# kafka consumer ##################
#kafka.consumer.bootstrap.servers = localhost:2821,localhost:9092
kafka.consumer.bootstrap.servers = 10.123.1.151:9092
# 如果为true,消费者的偏移量将在后台定期提交
kafka.consumer.enable.auto.commit = true
#如何设置为自动提交(enable.auto.commit=true),这里设置自动提交周期
kafka.consumer.auto.commit.interval.ms=1000
#order-beta 消费者群组ID,发布-订阅模式,即如果一个生产者,多个消费者都要消费,那么需要定义自己的群组,同一群组内的消费者只有一个能消费到消息
kafka.consumer.group.id = sccl-nwbs
#在使用Kafka的组管理时,用于检测消费者故障的超时
kafka.consumer.session.timeout.ms = 30000
kafka.consumer.key.deserializer = org.apache.kafka.common.serialization.StringDeserializer
kafka.consumer.value.deserializer = org.apache.kafka.common.serialization.StringDeserializer
#订阅主题
kafka.consumer.topic = SW_EFILES
  1. producer-kafka.xml
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
       xsi:schemaLocation="http://www.springframework.org/schema/beans
         http://www.springframework.org/schema/beans/spring-beans.xsd
         http://www.springframework.org/schema/context
         http://www.springframework.org/schema/context/spring-context.xsd">

    <!--<context:property-placeholder location="classpath:kafka/kafka.properties" />-->
    <!-- 定义producer的参数 -->
    <bean id="producerProperties" class="java.util.HashMap">
        <constructor-arg>
            <map>
                <entry key="bootstrap.servers" value="${kafka.producer.bootstrap.servers}" />
                <entry key="group.id" value="${kafka.consumer.group.id}" />
                <!--发送失败重试次数-->
                <entry key="retries" value="${kafka.producer.retries}" />
                <!--批量大小-->
                <entry key="batch.size" value="${kafka.producer.batch.size}" />
                <!--提交延时-->
                <entry key="linger.ms" value="${kafka.producer.linger.ms}" />
                <!--生产端缓冲区大小-->
                <entry key="buffer.memory" value="${kafka.producer.buffer.memory}" />
                <!--发送消息,等待leader收到确认,并进行复制操作后,才返回,最高的可靠性-->
                <entry key="acks" value="${kafka.producer.acks}" />
                <!--# Kafka提供的序列化和反序列化类-->
                <entry key="key.serializer" value="${kafka.producer.key.serializer}" />
                <entry key="value.serializer" value="${kafka.producer.value.serializer}"/>
            </map>
        </constructor-arg>
    </bean>

    <!-- 创建kafkatemplate需要使用的producerfactory bean -->
    <bean id="producerFactory" class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
        <constructor-arg>
            <ref bean="producerProperties" />
        </constructor-arg>
    </bean>

    <!-- 创建kafkatemplate bean,使用的时候,只需要注入这个bean,即可使用template的send消息方法 -->
    <bean id="kafkaTemplate" class="org.springframework.kafka.core.KafkaTemplate">
        <constructor-arg ref="producerFactory" />
        <constructor-arg name="autoFlush" value="true" />
        <property name="defaultTopic" value="${kafka.producer.defaultTopic}" />
        <!--启用生产者-监听器-->
        <property name="producerListener" ref="producerListener"/>
    </bean>

    <bean id="producerListener" class="com.efiles.kafka.KafkaProducerListener" />
</beans>
  1. consumer-kafka.xml
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
       xmlns:context="http://www.springframework.org/schema/context"
       xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd">

    <!-- 1.定义consumer的参数 -->
    <!--<context:property-placeholder location="classpath:META-INF/kafka/kafka.properties"/>-->
    <!-- 基本的配置参数   可以写成配置文件或者这种${bootstrap.servers} 配置文件获取的  可以区分开发测试环境    -->
    <bean id="consumerProperties" class="java.util.HashMap">
        <constructor-arg>
            <map>
                <entry key="bootstrap.servers" value="${kafka.consumer.bootstrap.servers}"/>
                <entry key="group.id" value="${kafka.consumer.group.id}"/>
                <entry key="enable.auto.commit" value="${kafka.consumer.enable.auto.commit}"/>
                <entry key="session.timeout.ms" value="${kafka.consumer.session.timeout.ms}"/>
                <entry key="auto.commit.interval.ms" value="${kafka.consumer.auto.commit.interval.ms}"/>
                <entry key="retry.backoff.ms" value="100"/>
                <entry key="key.deserializer"
                       value="${kafka.consumer.key.deserializer}"/>
                <entry key="value.deserializer"
                       value="${kafka.consumer.value.deserializer}"/>
            </map>
        </constructor-arg>
    </bean>

    <!-- 创建工厂  然后把配置信息注入-->
    <bean id="consumerFactory"
          class="org.springframework.kafka.core.DefaultKafkaConsumerFactory">
        <constructor-arg>
            <ref bean="consumerProperties" />
        </constructor-arg>
    </bean>

    <!-- 把实际消费的类关联进来 -->
    <bean id="messageListernerConsumerService" class="com.XXX.kafka.KafkaConsumerServer" />

    <!-- 然后把这个类和消费的topic注入这个container  topic也配置成灵活的 -->
    <bean id="containerProperties"
          class="org.springframework.kafka.listener.config.ContainerProperties">
        <constructor-arg name="topics" value="${kafka.consumer.topic}"/>
        <property name="messageListener" ref="messageListernerConsumerService" />
    </bean>

    <!--消费者并发消息监听容器,执行doStart()方法 -->
    <bean id="messageListenerContainer"
          class="org.springframework.kafka.listener.KafkaMessageListenerContainer"
          init-method="doStart">
        <constructor-arg ref="consumerFactory" />
        <constructor-arg ref="containerProperties" />
    </bean>

</beans>

4.创建测试类
KafkaProducerServer.java

package com.xxx.kafka;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;


/**
 * @ClassName KafkaProducerServer
 * @Author Administrator
 * @Date 2021/5/10 0010 下午 3:47
 **/

@Controller
@RequestMapping("/test")
public class KafkaProducerServer {

    /*@Autowired
    private KafkaTemplate kafkaTemplate;

    @RequestMapping(value = "/kafka", method = RequestMethod.GET)
    public void test(){
        //kafkaTemplate.sendDefault("消息体");
        //kafkaTemplate.send("topics", "hello,kafka");
        kafkaTemplate.send("SW_EFILES", "hello,kafka");
    }*/
}

KafkaProducerListener.java
package com.xxx.kafka;

import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.log4j.Logger;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.ProducerListener;

/**
 * @ClassName KafkaProducerListener
 * @Author Administrator
 * @Date 2021/5/10 0010 下午 3:09
 **/

public class KafkaProducerListener implements ProducerListener {

    private static final Logger LOG = Logger.getLogger(KafkaListener.class);
    @Override
    public void onSuccess(String topic, Integer partition, Object key, Object value, RecordMetadata recordMetadata) {
        LOG.info("==========kafka发送数据成功(日志开始)==========");
        LOG.info("----------topic:"+topic);
        LOG.info("----------partition:"+partition);
        LOG.info("----------key:"+key);
        LOG.info("----------value:"+value);
        LOG.info("----------RecordMetadata:"+recordMetadata);
        LOG.info("~~~~~~~~~~kafka发送数据成功(日志结束)~~~~~~~~~~");
    }

    /**
     * 发送消息错误后调用
     */
    @Override
    public void onError(String topic, Integer partition, Object key, Object value, Exception exception) {
        LOG.info("==========kafka发送数据错误(日志开始)==========");
        LOG.info("----------topic:"+topic);
        LOG.info("----------partition:"+partition);
        LOG.info("----------key:"+key);
        LOG.info("----------value:"+value);
        LOG.info("----------Exception:"+exception);
        LOG.info("~~~~~~~~~~kafka发送数据错误(日志结束)~~~~~~~~~~");
        exception.printStackTrace();

    }

    /**
     * 方法返回值代表是否启动kafkaProducer监听器
     */
    @Override
    public boolean isInterestedInSuccess() {
        LOG.info("--------------✨ kafkaProducer监听器启动 ✨-------------");
        return true;
    }

}

KafkaConsumerServer.java
package com.xxx.kafka;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.listener.MessageListener;

import java.util.ArrayList;
import java.util.List;

public class KafkaConsumerServer implements MessageListener<String, String>  {

    @Override
    public void onMessage(ConsumerRecord<String, String> data) {
        System.out.println("---收到mrdNewsSearchTopic的kafka消息---");
        System.out.println(data.value());
    }
  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值