上一篇讲述 kafka的推送 ,该篇文章也是讲述 kafka的推送,大同小异!
生产者
pom.xml文件引入依赖
<!--kafka-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
application.yml文件配置kafka信息
#kafka推送消息信息配置
kafka:
#url
url: 192.168.124.22:9092
#topic名称
topic: testKafka
#发生错误后,消息重发的次数
retries: 0
#Broker对producer即将发送来的数据采用何种确认方式
acks: 1
#消费者配置
consumer:
enable-auto-commit: true
group-id: test_group
auto-commit-interval: 1000
auto-offset-reset: earliest
concurrency: 3
ProducerKafkaService 类
package com.bigdata.bigdata.service;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Service;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
@Service
public class ProducerKafkaService {
@Autowired
private KafkaTemplate<String,Object> kafkaTemplate;
public void sendMesssge(String msg,String topic){
ListenableFuture<SendResult<String,Object>> future;
if(topic==null){
future=kafkaTemplate.sendDefault(msg);
}else{
future=kafkaTemplate.send(topic,msg);
}
future.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
@Override
public void onFailure(Throwable throwable) {
System.out.println("kafka推送失败"+throwable);
}
@Override
public void onSuccess(SendResult<String, Object> result) {
System.out.println("kafka推送成功:topic="+result.getProducerRecord().topic()
+",消息="+result.getProducerRecord().value()
+",分区="+result.getRecordMetadata().partition()
+",偏移量="+result.getRecordMetadata().offset());
}
});
}
}
ProducerKafkaController 类
package com.bigdata.bigdata.controller;
import com.alibaba.fastjson.JSON;
import com.bigdata.bigdata.entity.pojo.MsgLog;
import com.bigdata.bigdata.service.ProducerKafkaService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
impo