spring-integration-kafka集成

spring-kafka-producer.xml 配置

 

<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xmlns:int="http://www.springframework.org/schema/integration" xmlns:int-kafka="http://www.springframework.org/schema/integration/kafka"
    xmlns:task="http://www.springframework.org/schema/task"
    xsi:schemaLocation="http://www.springframework.org/schema/integration/kafka http://www.springframework.org/schema/integration/kafka/spring-integration-kafka.xsd
        http://www.springframework.org/schema/integration http://www.springframework.org/schema/integration/spring-integration.xsd
        http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
        http://www.springframework.org/schema/task http://www.springframework.org/schema/task/spring-task.xsd">

    <!-- commons config -->
    <bean id="stringSerializer" class="org.apache.kafka.common.serialization.StringSerializer" />
    
    <bean id="kafkaEncoder" class="org.springframework.integration.kafka.serializer.avro.AvroReflectDatumBackedKafkaEncoder">
        <constructor-arg value="java.lang.String" />
    </bean>
    
    <!-- 默认设置为同步发送消息 acks 0:直接返回,1:leader replica 成功返回 ,3:-1 所有同步完成 -->
    <bean id="producerProperties" class="org.springframework.beans.factory.config.PropertiesFactoryBean">
        <property name="properties">
            <props>
                <prop key="topic.metadata.refresh.interval.ms">${kafka.producer.refresh.interval.ms}</prop>
                <prop key="message.send.max.retries">${kafka.producer.retries}</prop>
                <prop key="request.required.acks">${kafka.producer.acks}</prop>
                <!-- <prop key="partitioner.class">com.jinghan.backend.util.common.KafkaPartitioner</prop> <prop key="num.pratitions">4</prop> -->
                <prop key="producer.type">sync</prop>
            </props>
        </property>
    </bean>
    <bean id="asyncProducerProperties" class="org.springframework.beans.factory.config.PropertiesFactoryBean">
        <property name="properties">
            <props>
                <prop key="topic.metadata.refresh.interval.ms">${kafka.producer.refresh.interval.ms}</prop>
                <prop key="message.send.max.retries">${kafka.producer.retries}</prop>
                <prop key="request.required.acks">${kafka.producer.acks}</prop>
                <!-- <prop key="partitioner.class">com.jinghan.backend.util.common.KafkaPartitioner</prop> <prop key="num.pratitions">4</prop> -->
                <prop key="producer.type">async</prop>
                <prop key="batch.num.messages">10</prop>
            </props>
        </property>
    </bean>

    <!-- outbound-channel-adapter 内部使用一个线程池处理消息   -->
    <int-kafka:outbound-channel-adapter 
        id="OutboundChannelAdapterTopic" kafka-producer-context-ref="asyncProducerContextTopic" 
        auto-startup="true" channel="kafkaTopic" order="2">
        <int:poller fixed-delay="1000" time-unit="MILLISECONDS" receive-timeout="1" task-executor="taskExecutor" />
    </int-kafka:outbound-channel-adapter>
    
    <task:executor id="taskExecutor" pool-size="5" keep-alive="120" queue-capacity="500" />
    
    <!-- 通道定义 config -->
    <int:channel id="kafkaTopic">
        <int:queue />
    </int:channel>

    <!-- 配置producer列表,要处理的topic -->
    <int-kafka:producer-context id="asyncProducerContextTopic" producer-properties="asyncProducerProperties">
        <int-kafka:producer-configurations>
            <int-kafka:producer-configuration broker-list="${kafka.cluster.broker}" key-serializer="stringSerializer"
                value-class-type="java.lang.String" value-serializer="stringSerializer" topic="${kafka.topic_table}" />
            <int-kafka:producer-configuration broker-list="${kafka.cluster.broker}" key-serializer="stringSerializer"
                value-class-type="java.lang.String" value-serializer="stringSerializer" topic="${kafka.topic_dish}" />
            <int-kafka:producer-configuration broker-list="${kafka.cluster.broker}" key-serializer="stringSerializer"
                value-class-type="java.lang.String" value-serializer="stringSerializer" topic="${kafka.topic_waiter}" />
            <int-kafka:producer-configuration broker-list="${kafka.cluster.broker}" key-serializer="stringSerializer"
                value-class-type="java.lang.String" value-serializer="stringSerializer" topic="${kafka.topic_order}" />
        </int-kafka:producer-configurations>
    </int-kafka:producer-context>

</beans>

 

spring-kafka-consumer.xml配置

<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xmlns:int="http://www.springframework.org/schema/integration" xmlns:int-kafka="http://www.springframework.org/schema/integration/kafka"
    xmlns:task="http://www.springframework.org/schema/task"
    xsi:schemaLocation="http://www.springframework.org/schema/integration/kafka
                        http://www.springframework.org/schema/integration/kafka/spring-integration-kafka.xsd
                        http://www.springframework.org/schema/integration
                        http://www.springframework.org/schema/integration/spring-integration.xsd
                        http://www.springframework.org/schema/beans
                        http://www.springframework.org/schema/beans/spring-beans.xsd
                        http://www.springframework.org/schema/task
                        http://www.springframework.org/schema/task/spring-task.xsd">

    <!-- topic test conf -->
    <int:channel id="inputFromKafka">
        <int:dispatcher task-executor="kafkaMessageExecutor" />
    </int:channel>
    
    <!-- zookeeper配置 可以配置多个 -->
    <int-kafka:zookeeper-connect id="zookeeperConnect" zk-connect="${kafka.cluster.zookeeper}"
        zk-connection-timeout="6000" zk-session-timeout="6000" zk-sync-time="2000" />
        
    <!-- channel配置 auto-startup="true" 否则接收不发数据 -->
    <int-kafka:inbound-channel-adapter id="kafkaInboundChannelAdapter"
        kafka-consumer-context-ref="consumerContext" auto-startup="true" channel="inputFromKafka">
        <int:poller fixed-delay="1" time-unit="MILLISECONDS" />
    </int-kafka:inbound-channel-adapter>
    
    <task:executor id="kafkaMessageExecutor" pool-size="8" keep-alive="120" queue-capacity="500" />
    
    <bean id="kafkaDecoder" class="org.springframework.integration.kafka.serializer.common.StringDecoder" />

    <bean id="consumerProperties" class="org.springframework.beans.factory.config.PropertiesFactoryBean">
        <property name="properties">
            <props>
                <prop key="auto.offset.reset">smallest</prop>
                <prop key="socket.receive.buffer.bytes">10485760</prop> <!-- 10M -->
                <prop key="fetch.message.max.bytes">5242880</prop>
                <prop key="auto.commit.interval.ms">1000</prop>
            </props>
        </property>
    </bean>
    
    <!-- 消息接收的BEEN -->
    <bean id="kafkaConsumerService" class="com.jinghan.backend.order.kafka.consumer.impl.KafkaConsumerServiceImpl" />
    
    <!-- 指定接收的方法 -->
    <int:outbound-channel-adapter channel="inputFromKafka" ref="kafkaConsumerService" method="processMessage" />

    <int-kafka:consumer-context id="consumerContext" consumer-timeout="1000" zookeeper-connect="zookeeperConnect"
        consumer-properties="consumerProperties">
        <int-kafka:consumer-configurations>
            <int-kafka:consumer-configuration group-id="${kafka.cosumer.group-order-default}" value-decoder="kafkaDecoder"
                key-decoder="kafkaDecoder" max-messages="5000">
                <int-kafka:topic id="${kafka.topic_table}" streams="4" />
                <int-kafka:topic id="${kafka.topic_dish}" streams="4" />
                <int-kafka:topic id="${kafka.topic_waiter}" streams="4" />
            </int-kafka:consumer-configuration>
        </int-kafka:consumer-configurations>
    </int-kafka:consumer-context>
</beans>
 

 

3 属性配置信息:

 

#kafka common
filter.kafka.cluster.broker=192.168.2.9:9092,192.168.2.10:9092
filter.kafka.cluster.zookeeper=192.168.2.9:2181,192.168.2.10:2181

#kafka-topic
filter.kafka.topic_default=topic_default
filter.kafka.topic_account=topic_account
filter.kafka.topic_order=topic_order
filter.kafka.topic_merchant=topic_merchant
filter.kafka.topic_dish=topic_dish
filter.kafka.topic_table=topic_table
filter.kafka.topic_waiter=topic_waiter
filter.kafka.topic_vendor=topic_vendor
filter.kafka.topic_payment=topic_payment

#kafka-producer
filter.kafka.producer.refresh.interval.ms=3600000
filter.kafka.producer.retries=5
filter.kafka.producer.acks=1

#kafka-consumer  
filter.kafka.cosumer.group-default=default
filter.kafka.cosumer.group-order-default=order_dev_local
filter.kafka.cosumer.group-order-id=order_dev_local_1
 

 

4 消息者实现:

 

/*
 * Copyright 1999-2024 Colotnet.com All right reserved. This software is the confidential and proprietary information of
 * Colotnet.com ("Confidential Information"). You shall not disclose such Confidential Information and shall use it only
 * in accordance with the terms of the license agreement you entered into with Colotnet.com.
 */
package com.jinghan.backend.mcnt.kafka.consumer.impl;

import java.util.Collection;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.socket.TextMessage;

import com.alibaba.fastjson.JSON;
import com.jinghan.backend.base.vo.BaseMessage;
import com.jinghan.backend.mcnt.domain.merchant.MerchantSocketMessage;
import com.jinghan.backend.mcnt.kafka.consumer.KafkaConsumerService;
import com.jinghan.backend.mcnt.service.merchant.MerchantSocketMessageService;
import com.jinghan.backend.mcnt.websocket.MerchantWebsocketEndPoint;
import com.jinghan.backend.mcnt.websocket.util.WebSocketManager;


/**
 * 
 * 消费接收类
 * @author zengxiaoning
 * @version 1.0
 * @Data 2017/04/13
 */
public class KafkaConsumerServiceImpl implements KafkaConsumerService{

    static final Logger logger = LoggerFactory.getLogger(KafkaConsumerServiceImpl.class);
    
    @Autowired
    MerchantSocketMessageService merchantSocketMessageService;
    
    @Override
    public void processMessage(Map<String, Map<Integer, String>> msgs) {
        if(null == msgs || msgs.isEmpty()){
            return;
        }
        
        //遍历当前监听的所有主题,一个监听器可以处理多个topic
         for (Map.Entry<String, Map<Integer, String>> entry : msgs.entrySet()) {
            
            LinkedHashMap<Integer, String> messages = (LinkedHashMap<Integer, String>) entry.getValue();
            Set<Integer> keys = messages.keySet();
            if(null == keys || keys.size() == 0){
                continue;
            }
            //遍历所有分区,一个消息只会存在一个分区里面
            for (Integer i : keys){
                Collection<String> values = messages.values();
                for (Iterator<String> iterator = values.iterator(); iterator.hasNext();) {
                    String message = iterator.next();
                    System.out.println("receive data from kafka:"+message);
                    //消息body实体
                    BaseMessage<?> tableMsg = JSON.parseObject(message, BaseMessage.class);
                    try {
                        if (null != tableMsg && null != tableMsg.getMerchantId()) {

                            MerchantWebsocketEndPoint socketHandler = WebSocketManager.getWebSocketEndPoint(tableMsg.getMerchantId());
                            TextMessage textMessage = new TextMessage(message);                        
                            try {
                                if(null != socketHandler){
                                     socketHandler.handleMessage(socketHandler.getSession(), textMessage);
                                }
                             } catch (Exception e) {
                                 //消息消费异常
                                 MerchantSocketMessage socketMessage = new MerchantSocketMessage();
                                socketMessage.setMerchantId(tableMsg.getMerchantId());
                                socketMessage.setMsgType(tableMsg.getMsgType().toString());
                                socketMessage.setMsgBody(message);
                                socketMessage.setCreateTime(new Date());
                                merchantSocketMessageService.insert(socketMessage);
                                 logger.error("close session error");
                             }  
                        }
                    } catch (Exception e) {
                        logger.error("consume topic error"+e);
                    }
                    continue;
                     
                }
            }           
        }
    }

}
 

 

 

5 生产者实现:

/*
 * Copyright 1999-2024 Colotnet.com All right reserved. This software is the confidential and proprietary information of
 * Colotnet.com ("Confidential Information"). You shall not disclose such Confidential Information and shall use it only
 * in accordance with the terms of the license agreement you entered into with Colotnet.com.
 */
package com.jinghan.backend.base.service.impl;

import java.util.UUID;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.integration.kafka.support.KafkaHeaders;
import org.springframework.integration.support.MessageBuilder;
import org.springframework.messaging.MessageChannel;
import org.springframework.stereotype.Service;

import com.jinghan.backend.base.service.IKafkaService;
import com.jinghan.backend.base.service.enums.TopicEnum;

/**
 * 
 * 发消息实现类
 * @author zengxiaoning
 * @version 1.0
 * @Data 2017/04/13
 */
@Service
public class KafkaServiceImpl implements IKafkaService {
    
    private final static String SEND_ID="send_id";
    
    @Autowired(required=false)
    @Qualifier("kafkaTopic")
    MessageChannel channel;
    
    @Override
    public boolean send(String message) throws Exception {
         
        return channel.send(MessageBuilder.withPayload(message).setHeader(SEND_ID, getSendId()).setHeader(KafkaHeaders.TOPIC,TopicEnum.TOPIC_DEFAULT.getName()).build());
    }

    @Override
    public boolean send(String topic, String message) throws Exception {
        
        return channel.send(MessageBuilder.withPayload(message).setHeader(SEND_ID, getSendId()).setHeader(KafkaHeaders.TOPIC,topic).build());
    }
    private String getSendId() {
        return UUID.randomUUID().toString().replaceAll("-", "");
    }

}

转载于:https://my.oschina.net/zengxiaoning/blog/894597

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值