实现思路过程讲解
- 客户端(消息生产者)创建好Topic发送对象KafkaProducer。并指定你的消息对象
private KafkaProducer<String, KafkaInformation> producer;// KafkaInformation是发送的消息
- 客户端(消息生产者)定义消息编码器ObjectEncoder ,返回对象的字节数组
public class ObjectEncoder implements Serializer< KafkaInformation >
- 在客户端(消息生产者)的KafkaProducer对象配置中指定你的ObjectEncoder
props.put(“value.serializer”, ObjectEncoder.class);
- 同理服务端(消息者)则创建好对象的消息接收对象和解码器,最后将数据读出来
1.1 客户端发送,服务端消息接收线程
package com.gdunicom.demo.socket;
import com.gdunicom.demo.domain.vo.KafkaInformation;
import com.gdunicom.demo.domain.vo.SocketMsgDataVo;
import com.gdunicom.demo.kafka.ObjectEncoder;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.net.Socket;
import java.util.Properties;
/**
* **客户端发送,服务端消息接收线程**
*/
public class ClientRecvThread implements Runnable {
private KafkaProducer<String, KafkaInformation> producer;
private final static Logger log = LoggerFactory.getLogger(ClientRecvThread.class);
private Socket socket;
private volatile boolean isStop = false;
public ClientRecvThread(Socket socket) {
this.socket = socket;
// 初始化kafka生产者
Properties props = new Properties();
// 客户端往哪个服务端的哪个接口推送数据
props.put("bootstrap.servers", "localhost:9092");
props.put("key.serializer", StringSerializer.class);
props.put("value.serializer", ObjectEncoder.class);
props.put("acks","all");
props.put("retries",0);
props.put("batch.size",16384);
props.put("linger.ms",1);
props.put("buffer.memory",33554432);
this.producer = new KafkaProducer(props);
}
@Override
public void run() {
//线程终止条件: 设置标志位为 true or socket 已关闭
InputStream inputStream = null;
DataInputStream dataInputStream = null;
try {
inputStream = socket.getInputStream();
dataInputStream = new DataInputStream(inputStream);
while (!isStop && !socket.isClosed()) {
KafkaInformation kafkaInformation = SocketUtil.readData(inputStream);
System.out.println("接收到数据:"+kafkaInformation);
ProducerRecord producerRecord = new ProducerRecord("testTopic", kafkaInformation);
producer.send(producerRecord);
Thread.sleep(1000);
// 这里调用kafka生产者发送数据
}
} catch (IOException | InterruptedException e) {
log.error("客户端接收消息发生异常");
e.printStackTrace();
} finally {
this.isStop = true;
log.info("客户端旧接收线程已摧毁");
StreamUtil.closeInputStream(dataInputStream);
StreamUtil.closeInputStream(inputStream);
SocketUtil.closeSocket(socket);
}
}
public boolean getStop() {
return isStop;
}
public void setStop(boolean stop) {
isStop = stop;
}
}
1.2 客户端消息编码器
package com.gdunicom.demo.kafka;
import com.gdunicom.demo.domain.vo.KafkaInformation;
import com.gdunicom.demo.socket.ByteUtil;
import kafka.utils.VerifiableProperties;
import lombok.SneakyThrows;
import org.apache.kafka.common.serialization.Serializer;
public class ObjectEncoder implements Serializer<KafkaInformation> {
@SneakyThrows
@Override
public byte[] serialize(String topic, KafkaInformation data) {
return ByteUtil.objectToByte(data);
}
}
2.1 服务端接收消费信息
package com.gdunicom.demo.kafka;
import com.gdunicom.demo.domain.vo.KafkaInformation;
import com.gdunicom.demo.socket.ByteUtil;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
/**
* 独立线程消费topic数据
*/
public class KafkaConsumerTest implements Runnable {
private final KafkaConsumer<String, KafkaInformation> consumer;
private ConsumerRecords<String, KafkaInformation> msgList;
private final String topic;
private static final String GROUPID = "c_test";//消费者组,随便填
public KafkaConsumerTest(String topicName) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("security.protocol", "PLAINTEXT");
props.put("group.id", GROUPID);
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("session.timeout.ms", "30000");
props.put("auto.offset.reset", "earliest");//从何处开始消费,latest 表示消费最新消息,earliest 表示从头开始消费,none表示抛出异常,默认latest
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", ObjectDecoder.class);
this.consumer = new KafkaConsumer<>(props);
this.topic = topicName;
List<TopicPartition> topicPartitions = new ArrayList<>();
topicPartitions.add(new TopicPartition(topicName,0));
this.consumer.assign(topicPartitions);
}
@Override
public void run() {
int messageNo = 1;
System.out.println("---------开始消费---------");
try {
for (; ; ) {
msgList = consumer.poll(Duration.ofSeconds(2L));
if (null != msgList && msgList.count() > 0) {
for (ConsumerRecord<String, KafkaInformation> record : msgList) {
KafkaInformation value = record.value();
System.out.println("数据来啦!第"+messageNo+"条-----------------------------------------------------"+value);
messageNo++;
consumer.commitAsync();
}
} else {
Thread.sleep(100);
}
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
consumer.close();
}
}
public static void main(String[] args) {
KafkaConsumerTest test1 = new KafkaConsumerTest("testTopic");
Thread thread1 = new Thread(test1);
thread1.start();
}
}
2.2 服务端消息解码器
package com.gdunicom.demo.kafka;
import com.gdunicom.demo.domain.vo.KafkaInformation;
import com.gdunicom.demo.socket.ByteUtil;
import lombok.SneakyThrows;
import org.apache.kafka.common.serialization.Deserializer;
public class ObjectDecoder implements Deserializer<KafkaInformation> {
@SneakyThrows
@Override
public KafkaInformation deserialize(String topic, byte[] data) {
return ByteUtil.byteToObject(data);
}
}