1.kafka配置信息
spring.kafka.bootstrap-servers=IP:9092,IP:9093
# producer
spring.kafka.producer.retries=1
spring.kafka.producer.batch-size=16384
spring.kafka.producer.buffer-memory=33554432
spring.kafka.producer.acks=all
spring.kafka.producer.properties.linger.ms=5
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
# consumer
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.enable-auto-commit=true
spring.kafka.consumer.auto-commit-interval=1000
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
2.配置加载bean
@Configuration
public class KafkaConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String servers;
@Value("${spring.kafka.producer.retries}")
private int retries;
@Value("${spring.kafka.producer.acks}")
private String acks;
@Value("${spring.kafka.producer.batch-size}")
private int batchSize;
@Value("${spring.kafka.producer.properties.linger.ms}")
private int linger;
@Value("${spring.kafka.producer.buffer-memory}")
private int bufferMemory;
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ProducerConfig.RETRIES_CONFIG, retries);
props.put(ProducerConfig.ACKS_CONFIG, acks);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//自定义拦截链
//List<String> interceptors = new ArrayList<>();
//interceptors.add("com.zhangmr.interceptor.TestProducerInterceptor");
//interceptors.add("com.zhangmr.interceptor.CounterInterceptor");
//props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG,interceptors);
//自定义分区器
//props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,"com.zhangmr.partition.SimplePartitioner");
return props;
}
public ProducerFactory<String, Object> producerFactory() {
DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerConfigs());
factory.transactionCapable();
factory.setTransactionIdPrefix("tran-");
return factory;
}
@Bean
public KafkaTransactionManager transactionManager() {
KafkaTransactionManager manager = new KafkaTransactionManager(producerFactory());
return manager;
}
@Bean //创建一个kafka管理类,相当于rabbitMQ的管理类rabbitAdmin,没有此bean无法自定义的使用adminClient创建topic
public KafkaAdmin kafkaAdmin() {
Map<String, Object> props = new HashMap<>();
//配置Kafka实例的连接地址
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
KafkaAdmin admin = new KafkaAdmin(props);
return admin;
}
@Bean //kafka客户端,在spring中创建这个bean之后可以注入并且创建topic
public AdminClient adminClient() {
return AdminClient.create(kafkaAdmin().getConfig());
}
}
3.创建消费者监听
@Component
public class ConsumerListener {
@KafkaListener(topics = "springboottopic",groupId = "springboottopic-group")
public void onMessage(String msg){
System.out.println("----收到消息:"+msg+"----");
}
}
4.创建生产者发送消息
@Component
public class KafkaProducer {
@Autowired
private KafkaTemplate<String,Object> kafkaTemplate;
public String send(@RequestParam String msg){
kafkaTemplate.send("springboottopic", msg);
return "ok";
}
}
5.创建测试类
@SpringBootTest
class KafkaTests {
@Autowired
KafkaProducer producer;
// 消费者:先启动 kafkaApp
@Test
void testSendMsg() {
long time = System.currentTimeMillis();
System.out.println("----"+time +",已经发出----");
producer.send("zhangmr" +time);
}
//@RequestMapping("topic/all")
//public void getAllTopic() throws Exception {
//ListTopicsResult listTopics = adminClient.listTopics();
//Set<String> topics = listTopics.names().get();
//for (String topic : topics) {
//System.err.println(topic);
//}
//}
// @RequestMapping("topic/num")
// public void getPartition(String topic) {
// int num = 0;
// try {
// TopicDescription description = adminClient.describeTopics(Arrays.asList(topic)).all().get().get(topic);
// //r = description.toString();
// num = description.partitions().size();
// System.out.println("num:" + num);
// }
// catch (Throwable e) {
// e.printStackTrace();
// }
//
// }
//public void updatePartitions(String topic) {
//
// NewPartitions newPartitions = NewPartitions.increaseTo(numPartitions);
// Map<String, NewPartitions> map = new HashMap<>( );
// map.put(topic, newPartitions);
// NewTopic topic1 = new NewTopic(topic, 1, (short) 1);
// adminClient.createTopics(Arrays.asList(topic1));
// try {
// adminClient.createPartitions(map).all().get();
// }
// catch (Throwable e) {
// e.printStackTrace();
// }
// }
}
先启动springboot项目,在运行测试类方法
6.查看服务器上kafka消息