Kafka最佳实践

消息对象:

import lombok.Data;

@Data
public class PersonInfo {
    private long createTime;
    private String idAccount;
    private String clientAddress;
    private String serverAddress;
}

生产者:

import com.alibaba.fastjson.JSON;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.net.InetAddress;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
/**
 * Kafka 安装部署:
 *   Kafka (kafka_2.12-2.3.0版本) 新建主题  Kafka-Topic。命令参考:
 *     bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 8 --topic  Kafka-Topic
 *
 * kafka配置
 *   #zk地址及zk根目录
 *   zookeeper.connect=
 *   #消息数据存储本地目录
 *   log.dirs=
 *   #监听端口与主机名
 *   listeners=
 *   #默认分片数
 *   num.partitions=8
 *   #数据保留时间
 *   log.retention.hours=2
 */
@Service
public class MqSender implements InitializingBean, DisposableBean {
    private static Logger logger = LoggerFactory.getLogger(MqSender.class);
    @Value("${test.kafka.servers}")
    private String kafkaServers;

    @Value("${test.mqSender.mode:2}")
    private int mqSenderMode; // 0 不发送 1同步发送 2异步发送 3单向发送 4不发送但进行消息转换
    private Producer<String, String> producerEvent;
    private String localhostAddress;

    public static class NtripAccountPartitioner implements Partitioner {
        public NtripAccountPartitioner() {
        }

        @Override
        public void configure(Map<String, ?> configs) {
        }

        @Override
        public void close() {
        }

        @Override
        public int partition(String topic, Object key, byte[] keyBytes,
                             Object value, byte[] valueBytes, Cluster cluster) {
            //根据key选往哪个分区发消息
            int numPartitions = cluster.partitionsForTopic(topic).size();
            String skey = (String) key;
            int ndot = skey.indexOf('.');
            String idAccount = skey.substring(ndot + 1);
            int index = (idAccount.hashCode() & 0x7fffffff) % numPartitions;
            return index;
        }
    }

    @Override
    public void afterPropertiesSet() throws Exception {
        this.localhostAddress = InetAddress.getLocalHost().getHostAddress();
        Map<String, Object> props = new HashMap<>();
        props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
        props.put(ProducerConfig.ACKS_CONFIG, "1");
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, 1000);
        props.put(ProducerConfig.LINGER_MS_CONFIG, 2000);
        props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, NtripAccountPartitioner.class);

        // DeviceEvent 消息发布者
        producerEvent = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer());
    }

    @Override
    public void destroy() throws Exception {
        producerEvent.flush();
        producerEvent.close();
    }


    // 发送设备事件的Kafka消息
    public void sendDeviceEvent(PersonInfo info) {
        if (mqSenderMode <= 0) return;

        try {
            doSendPersonInfo(info);
        } catch (Exception e) {
            logger.error("Send DeviceEvent message error, e={}", e);
        }
    }

    private void doSendPersonInfo(PersonInfo info) {
        info.setCreateTime(System.currentTimeMillis());
        info.setServerAddress(this.localhostAddress);
        String msgKey = Long.toString(info.getCreateTime()) + '_' + info.getClientAddress() + '.' + info.getIdAccount();
        String msgValue = JSON.toJSONString(info);
        sendKafkaMessage("Kafka-Topic", producerEvent, msgKey, msgValue);
    }

    private <V> void sendKafkaMessage(String topic, Producer<String, V> producer, String msgKey, V msgValue) {
        ProducerRecord<String, V> record = new ProducerRecord<>(topic, msgKey, msgValue);
        if (mqSenderMode == 1) {
            Future<RecordMetadata> future = producer.send(record);
            try {
                future.get(3000, TimeUnit.MILLISECONDS);
            } catch (Exception e) {
                logger.error("Kafka send error for msgKey={},e={} ", msgKey, e);
            }
        } else if (mqSenderMode == 2) {
            producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata metadata, Exception exception) {
                    if (exception != null)
                        logger.error("Kafka async error for  msgKey={},e={} ", msgKey, exception);
                }
            });
        } else if (mqSenderMode == 3) {
            producer.send(record);
        }
    }
}

消费者:

import com.alibaba.fastjson.JSON;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

@Component
public class PersonInfoMqListener implements InitializingBean, DisposableBean, ConsumerFactory.RecordListenner<String, String> {

	@Autowired
	private ConsumerFactory consumerFactory;

	@Override
	public void onMessages(ConsumerRecords<String, String> records) {
		for (ConsumerRecord<String, String> record : records) {
			PersonInfo info = JSON.parseObject(record.value(), PersonInfo.class);
			//业务逻辑处理
		}
	}

	private ConsumerFactory.MyConsumer<String, String> consumer;

	@Override
	public void afterPropertiesSet() throws Exception {
		this.consumer = consumerFactory.newConsumer("Kafka-Topic", "Kafka-Consumer-Group",
				new StringDeserializer(), new StringDeserializer(), this);
	}

	@Override
	public void destroy() throws Exception {
		if (consumer != null) {
			consumer.close();
			consumer = null;
		}
	}
}
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;

import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.Deserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

@Component
public class ConsumerFactory {
    public static interface RecordListenner<K, V> {
        void onMessages(ConsumerRecords<K, V> records);
    }

    private static Logger logger = LoggerFactory.getLogger(ConsumerFactory.class);

    public class MyConsumer<K, V> implements Runnable {
        private String topic;
        private KafkaConsumer<K, V> consumer;
        private RecordListenner<K, V> listenner;

        public MyConsumer(String topic,
                          KafkaConsumer<K, V> consumer,
                          RecordListenner<K, V> listenner) {
            this.topic = topic;
            this.consumer = consumer;
            this.listenner = listenner;

            consumer.subscribe(Collections.singletonList(topic));
            new Thread(this, "MyConsumer-" + topic).start();
        }

        @Override
        public void run() {
            while (listenner != null) {
                try {
                    ConsumerRecords<K, V> records = consumer.poll(Duration.ofMillis(1000));
                    if ((records == null) || records.isEmpty())
                        continue;
                    listenner.onMessages(records);
                } catch (Exception e) {
                    logger.error("MyConsumer run error", e);
                }
            }
        }

        public void close() {
            listenner = null;
            if (consumer != null) {
                consumer.close();
                consumer = null;
            }
        }
    }

    @Value("${test.kafka.servers}")
    private String kafkaServers;

    public <K, V> MyConsumer<K, V> newConsumer(
            String topic, String consumerGroup,
            Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer,
            RecordListenner<K, V> listenner) {
        Map<String, Object> props = new HashMap<>();
        props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true); //设置是否为自动提交
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 5000);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000); //每批最大1000条,超过1,自己业务服务需要保证能处理的过来
        props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        KafkaConsumer<K, V> consumer = new KafkaConsumer<>(props, keyDeserializer, valueDeserializer);
        return new MyConsumer<K, V>(topic, consumer, listenner);
    }
}

 

 

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值