最常用的MQ 前面两个都已经说了,这算是最后一个了依旧是直接上代码。
这次是直接用的kafka和springboot的整合。最基本的没整出来了,spring的有时间在做一下。
pom.xml文件
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>1.1.1.RELEASE</version>
</dependency>
生产者:
package com.yjp.kafkaspringboot.kafka;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import java.util.HashMap;
import java.util.Map;
@Configuration
@EnableKafka
public class KafkaProducerConfig {
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
//0:不进行消息接收确认,即Client端发送完成后不会等待Broker的确认 1:leader确定
props.put("acks", "1");
//当发生错误的时候重试的次数
props.put(ProducerConfig.RETRIES_CONFIG, 1);
//以每批发送数据,每批最大的大小
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 0);
//发送延时,延时1ms 将这1ms中的请求合拼,减少发送请求的次数 0则立即提交不等待
props.put(ProducerConfig.LINGER_MS_CONFIG, 0);
//缓存数据的内存大小 生产数据的速度大于发送数据的速度 则需要缓存
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 46900);
//key的序列化方式 若传入key的值,则根据该key的值进行hash散列计算出在哪个partition上
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//value的序列化方式
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
// 设置自定义的partition,当topic有多个partition时如何对message进行分区
// props.put("partitioner.class", "cn.com.dimensoft.kafka.SamplePartition");
// 消息发送类型 同步/异步
// props.put("producer.type", "sync");
return props;
}
//创建kafka工厂
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<String, String>(producerConfigs());
}
//创建kafka模板
@Bean(name = "kafkaTemplate")
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<String, String>(producerFactory());
}
}
消费者:
package com.yjp.kafkaspringboot.kafka;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
@Configuration
@EnableKafka
public class KafkaConsumerConfig {
public Map<String, Object> consumerConfigs() {
Map<String, Object> propsMap = new HashMap<>();
//zookeeper地址
// propsMap.put("zookeeper.connect", "localhost:2181");
//kafka服务器地址
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "197.168.27.75:9092");
//如果设为true,consumer会定时向ZooKeeper发送已经获取到的消息的offset。当consumer进程挂掉时,已经提交的offset可以继续使用,让新的consumer继续工作。
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
//consumer向ZooKeeper发送offset的时间间隔
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
//zookeeper最长时间没有检测到消费者的心跳 超过这个时间将消费者踢出
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
//消费者所在的组
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, "test-group");
//将offset的值移到最近保存的位置 latest 最早的位置 earliest
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
return propsMap;
}
public ConsumerFactory consumerFactory() {
return new DefaultKafkaConsumerFactory(consumerConfigs());
}
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
//设置并行度
factory.setConcurrency(1);
factory.getContainerProperties().setPollTimeout(3000);
return factory;
}
@Bean
public Listener listener() {
return new Listener();
}
}
监听:
package com.yjp.kafkaspringboot.kafka;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import java.util.Optional;
public class Listener {
static int i;
static int j;
@KafkaListener(topics = {"test-topic", "test-topic-1"})
public void listen1(ConsumerRecord record) {
System.err.println("----->" + record.value());//这个写法和下面的写法是一样的都可以得到值 ConsumerRecord是对消息的包装
// Optional kafkaMessage = Optional.ofNullable(record.value());
// if (kafkaMessage.isPresent()) {
// Object message = kafkaMessage.get();
// System.err.println("listen1" + message);
// }
}
@KafkaListener(topics = "test-topic")
public void listen2(ConsumerRecord record) {
System.err.println("listen2 " + (j++));
Optional kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
Object message = kafkaMessage.get();
System.err.println("listen2" + message);
}
}
}
Test:
package com.yjp.kafkaspringboot;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.test.context.junit4.SpringRunner;
@RunWith(SpringRunner.class)
@SpringBootTest
public class KafkaSpringbootApplicationTests {
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@Test
public void contextLoads() {
for (int i = 0; i < 100; i++) {
kafkaTemplate.send("test-topic", "hello " + i);
}
for (int i = 0; i < 100; i++) {
kafkaTemplate.send("test-topic-1", "hello-1 " + i);
}
}
}
代码到这里结束,在代码开始前启动kafka和zookeeper
启动kafka
使用cmd命令进入到D:\kafka\kafka_2.11-1.0下输入
.\bin\windows\kafka-server-start .\config\server.properties
努力吧,皮卡丘