1.说明
此代码实现是针对kafka_2.10的0.8.2.1版本Java代码实现,消费者是针对多个Topic消费的多线程实现
2.安装
参考:搭建Kafka简单教程
3.导入依赖
此处只导入kafka的依赖,线程池用的是spring的ThreadPoolTaskExeccutor线程池。
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.8.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.8.2.1</version>
</dependency>
4.配置
4.1 线程池的简单配置
@Configuration
public class SpringAsyncConfig {
@Value(value = "${async.pool.max.size:80}")
private int maxPoolSize;
@Value(value = "${async.pool.queue.size:20}")
private int queueSize;
@Value(value= "${async.pool.core.size:5}")
private int corePoolSize;
@Value(value= "${async.pool.core.size:5}")
private int knowsCorePoolSize;
@Bean(name = "asyncTaskExecutor")
public AsyncTaskExecutor taskExecutor() {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setMaxPoolSize(maxPoolSize);
executor.setQueueCapacity(queueSize);
executor.setCorePoolSize(corePoolSize);
executor.setWaitForTasksToCompleteOnShutdown(true);
return executor;
}
}
4.2 Kafka的简单配置
@Configuration
public class KafkaConfig {
public Properties producerConfig(){
Properties props = new Properties();
// 注意服务其的路径,尤其是在Linux系统上装的Kafka,路径错了,会导致收不到消息
props.put("bootstrap.servers", "localhost:9092");
props.put("timeout.ms", 3000);
props.put("metadata.fetch.timeout.ms", 3000);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("acks", "0");
return props;
}
@Bean
public KafkaProducer kafkaProducer() {
return new KafkaProducer(producerConfig());
}
public Properties consumerConfig(){
Properties props = new Properties();
props.put("auto.offset.reset", "smallest");
// 注意服务其的路径,尤其是在Linux系统上装的Kafka,路径错了,会导致收不到消息
props.put("zookeeper.connect", "172.31.52.83:2181");
props.put("group.id", "defaultGroup");
props.put("zookeeper.session.timeout.ms", "10000");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
props.put("partition.assignment.strategy", "range");
return props;
}
@Bean
public ConsumerConfig kafkaConsumer() {
ConsumerConfig consumerConfig = new ConsumerConfig(consumerConfig());
return consumerConfig;
}
}
5.生产者
@Component
public class KafkaProducerClient {
@Autowired
private KafkaProducer kafkaProducer;
private void produce(String topic,String message){
ProducerRecord<String,String> record = new ProducerRecord<>(topic,message);
kafkaProducer.send(record,(metadata,e) -> {
if(e != null){
// 消息发送失败
System.out.println("消息发送失败");
}
});
}
}
6.消费者
可以有若干个Topic,这里演示的是有两个Topic
6.1 消费者启动入口
/**
* 消费者启动类
* 简单说明:实现InitializingBean是为了项目启动后就启动消费者
*/
@Component
public class KafkaConsumerStart implements InitializingBean {
/**
* 线程池
*/
@Autowired
private AsyncTaskExecutor taskExecutor;
/**
* 消费者配置
*/
@Autowired
private ConsumerConfig consumerConfig;
/**
* 业务层处理得到的消息,因为多线程,Biz在新线程中不可见,所以需要先引入,,这里只是模拟,具体类实现省略
*/
@Autowired
private TopicsHandleBiz topicsHandleBiz;
private ConsumerConnector consumer;
@Override
public void afterPropertiesSet() {
try{
consumer = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
// topics
String testTopic1 = "testTopic1";
String testTopic2 = "testTopic2";
topicCountMap.put(testTopic1, 1);
topicCountMap.put(testTopic2, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
//启线程消费第一个testTopic1
List<KafkaStream<byte[], byte[]>> streams1 = consumerMap.get(testTopic1);
if(streams1.size() > 0){
taskExecutor.execute(new TestTopicHandle1(streams1.get(0),topicsHandleBiz));
}
//启线程消费第二个testTopic2
List<KafkaStream<byte[], byte[]>> streams2 = consumerMap.get(testTopic2);
if(streams2.size() > 0){
taskExecutor.execute(new TestTopicHandle2(streams2.get(0),topicsHandleBiz));
}
}catch (Exception e){
e.printStackTrace();
}
}
}
6.2 线程处理testTopic1
public class TestTopicHandle1 implements Runnable {
private TopicsHandleBiz topicsHandleBiz;
private KafkaStream kafkaStream;
public TestTopicHandle1(KafkaStream kafkaStream,TopicsHandleBiz topicsHandleBiz) throws ClassNotFoundException {
this.kafkaStream = kafkaStream;
this.topicsHandleBiz = topicsHandleBiz;
}
@Override
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
String message = new String(it.next().message());
try {
// 保存testTopic1的消息
TopicsHandleBiz.save(message);
}catch (Exception e){
// 异常
e.printStackTrace();
}
}
}
}
6.3 线程处理testTopic2
public class TestTopicHandle2 implements Runnable {
private TopicsHandleBiz topicsHandleBiz;
private KafkaStream kafkaStream;
public TestTopicHandle2(KafkaStream kafkaStream,TopicsHandleBiz topicsHandleBiz) throws ClassNotFoundException {
this.kafkaStream = kafkaStream;
this.topicsHandleBiz = topicsHandleBiz;
}
@Override
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
String message = new String(it.next().message());
try {
// 保存testTopic2的消息
TopicsHandleBiz.save(message);
}catch (Exception e){
// 异常
e.printStackTrace();
}
}
}
}