Kafka如何发送自定义消息对象

实现思路过程讲解

  1. 客户端(消息生产者)创建好Topic发送对象KafkaProducer。并指定你的消息对象

private KafkaProducer<String, KafkaInformation> producer;// KafkaInformation是发送的消息

  1. 客户端(消息生产者)定义消息编码器ObjectEncoder ,返回对象的字节数组

public class ObjectEncoder implements Serializer< KafkaInformation >

  1. 在客户端(消息生产者)的KafkaProducer对象配置中指定你的ObjectEncoder

props.put(“value.serializer”, ObjectEncoder.class);

  1. 同理服务端(消息者)则创建好对象的消息接收对象和解码器,最后将数据读出来

1.1 客户端发送,服务端消息接收线程

package com.gdunicom.demo.socket;

import com.gdunicom.demo.domain.vo.KafkaInformation;
import com.gdunicom.demo.domain.vo.SocketMsgDataVo;
import com.gdunicom.demo.kafka.ObjectEncoder;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.*;
import java.net.Socket;
import java.util.Properties;

/**
 * **客户端发送,服务端消息接收线程**
 */
public class ClientRecvThread implements Runnable {

    private KafkaProducer<String, KafkaInformation> producer;

    private final static Logger log = LoggerFactory.getLogger(ClientRecvThread.class);

    private Socket socket;

    private volatile boolean isStop = false;

    public ClientRecvThread(Socket socket) {
        this.socket = socket;

        // 初始化kafka生产者
        Properties props = new Properties();
        // 客户端往哪个服务端的哪个接口推送数据
        props.put("bootstrap.servers", "localhost:9092");
        props.put("key.serializer", StringSerializer.class);
        props.put("value.serializer", ObjectEncoder.class);
        props.put("acks","all");
        props.put("retries",0);
        props.put("batch.size",16384);
        props.put("linger.ms",1);
        props.put("buffer.memory",33554432);
        this.producer = new KafkaProducer(props);
    }


    @Override
    public void run() {
        //线程终止条件: 设置标志位为 true or socket 已关闭
        InputStream inputStream = null;
        DataInputStream dataInputStream = null;
        try {
            inputStream = socket.getInputStream();
            dataInputStream = new DataInputStream(inputStream);
            while (!isStop && !socket.isClosed()) {
                KafkaInformation kafkaInformation = SocketUtil.readData(inputStream);
                System.out.println("接收到数据:"+kafkaInformation);
                ProducerRecord producerRecord = new ProducerRecord("testTopic", kafkaInformation);
                producer.send(producerRecord);
                Thread.sleep(1000);
                // 这里调用kafka生产者发送数据

            }
        } catch (IOException | InterruptedException e) {
            log.error("客户端接收消息发生异常");
            e.printStackTrace();
        } finally {
            this.isStop = true;
            log.info("客户端旧接收线程已摧毁");
            StreamUtil.closeInputStream(dataInputStream);
            StreamUtil.closeInputStream(inputStream);
            SocketUtil.closeSocket(socket);
        }

    }

    public boolean getStop() {
        return isStop;
    }

    public void setStop(boolean stop) {
        isStop = stop;
    }
}

1.2 客户端消息编码器

package com.gdunicom.demo.kafka;


import com.gdunicom.demo.domain.vo.KafkaInformation;
import com.gdunicom.demo.socket.ByteUtil;
import kafka.utils.VerifiableProperties;
import lombok.SneakyThrows;
import org.apache.kafka.common.serialization.Serializer;

public class ObjectEncoder implements Serializer<KafkaInformation> {

    @SneakyThrows
    @Override
    public byte[] serialize(String topic, KafkaInformation data) {
        return ByteUtil.objectToByte(data);
    }
}

2.1 服务端接收消费信息

package com.gdunicom.demo.kafka;

import com.gdunicom.demo.domain.vo.KafkaInformation;
import com.gdunicom.demo.socket.ByteUtil;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;

/**
 * 独立线程消费topic数据
 */
public class KafkaConsumerTest implements Runnable {

    private final KafkaConsumer<String, KafkaInformation> consumer;
    private ConsumerRecords<String, KafkaInformation> msgList;
    private final String topic;
    private static final String GROUPID = "c_test";//消费者组,随便填

    public KafkaConsumerTest(String topicName) {
        Properties props = new Properties();
        props.put("bootstrap.servers", "localhost:9092");
        props.put("security.protocol", "PLAINTEXT");
        props.put("group.id", GROUPID);
        props.put("enable.auto.commit", "true");
        props.put("auto.commit.interval.ms", "1000");
        props.put("session.timeout.ms", "30000");
        props.put("auto.offset.reset", "earliest");//从何处开始消费,latest 表示消费最新消息,earliest 表示从头开始消费,none表示抛出异常,默认latest
        props.put("key.deserializer", StringDeserializer.class.getName());
        props.put("value.deserializer", ObjectDecoder.class);
        this.consumer = new KafkaConsumer<>(props);
        this.topic = topicName;
        List<TopicPartition> topicPartitions = new ArrayList<>();
        topicPartitions.add(new TopicPartition(topicName,0));
        this.consumer.assign(topicPartitions);
    }

    @Override
    public void run() {
        int messageNo = 1;
        System.out.println("---------开始消费---------");
        try {
            for (; ; ) {
                msgList = consumer.poll(Duration.ofSeconds(2L));
                if (null != msgList && msgList.count() > 0) {
                    for (ConsumerRecord<String, KafkaInformation> record : msgList) {
                        KafkaInformation value = record.value();
                        System.out.println("数据来啦!第"+messageNo+"条-----------------------------------------------------"+value);
                        messageNo++;
                        consumer.commitAsync();
                    }
                } else {
                    Thread.sleep(100);
                }
            }
        } catch (InterruptedException e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }
    }

    public static void main(String[] args) {
        KafkaConsumerTest test1 = new KafkaConsumerTest("testTopic");
        Thread thread1 = new Thread(test1);
        thread1.start();
    }
}


2.2 服务端消息解码器

package com.gdunicom.demo.kafka;

import com.gdunicom.demo.domain.vo.KafkaInformation;
import com.gdunicom.demo.socket.ByteUtil;
import lombok.SneakyThrows;
import org.apache.kafka.common.serialization.Deserializer;

public class ObjectDecoder implements Deserializer<KafkaInformation> {
    @SneakyThrows
    @Override
    public KafkaInformation deserialize(String topic, byte[] data) {
        return ByteUtil.byteToObject(data);
    }
}

Kafka 中传输自定义对象,需要进行对象的序列化和反序列化。以下是一个简单的示例,演示如何使用 Kafka 和 Avro 进行自定义对象的传输。 首先,您需要创建一个用于序列化和反序列化对象的 Avro schema,可以使用 Avro 的 IDL 或者 JSON 格式定义。假设我们有一个名为 User 的自定义对象,具有字段 name 和 age: ```avro { "type": "record", "name": "User", "fields": [ {"name": "name", "type": "string"}, {"name": "age", "type": "int"} ] } ``` 然后,您需要为该 schema 生成对应的 Java 类。可以使用 Avro 的工具将 schema 文件转换为 Java 类。假设生成的类名为 User。 接下来,您需要配置 Kafka 的生产者和消费者,以便使用 Avro 进行序列化和反序列化。在生产者端,您需要指定 Avro 的序列化器,并将 Avro 类型的对象作为消息发送Kafka: ```java Properties props = new Properties(); props.put("bootstrap.servers", "localhost:9092"); props.put("key.serializer", StringSerializer.class); props.put("value.serializer", KafkaAvroSerializer.class); props.put("schema.registry.url", "http://localhost:8081"); Producer<String, User> producer = new KafkaProducer<>(props); User user = new User(); user.setName("John"); user.setAge(30); ProducerRecord<String, User> record = new ProducerRecord<>("topic-name", "key", user); producer.send(record); ``` 在消费者端,您需要指定 Avro 的反序列化器,并接收 Avro 类型的对象: ```java Properties props = new Properties(); props.put("bootstrap.servers", "localhost:9092"); props.put("key.deserializer", StringDeserializer.class); props.put("value.deserializer", KafkaAvroDeserializer.class); props.put("schema.registry.url", "http://localhost:8081"); Consumer<String, User> consumer = new KafkaConsumer<>(props); consumer.subscribe(Collections.singletonList("topic-name")); ConsumerRecords<String, User> records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord<String, User> record : records) { String key = record.key(); User user = record.value(); System.out.println("Key: " + key + ", User: " + user.getName() + ", " + user.getAge()); } ``` 确保在您的项目中添加以下依赖项: ```xml <dependencies> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka-clients</artifactId> <version>2.8.0</version> </dependency> <dependency> <groupId>io.confluent</groupId> <artifactId>kafka-avro-serializer</artifactId> <version>6.2.0</version> </dependency> <dependency> <groupId>io.confluent</groupId> <artifactId>kafka-schema-registry</artifactId> <version>6.2.0</version> </dependency> </dependencies> ``` 请注意,上述示例假设您已经正确设置了 Kafka 和 Schema Registry,并且可以连接到这些服务。还要确保 Avro 的依赖项与 Kafka 版本兼容。 这只是一个简单的示例,您可以根据自己的需求进行相应的调整和扩展。希望对您有所帮助!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值