安装kafka
$ wget http://mirrors.shu.edu.cn/apache/kafka/2.0.0/kafka_2.11-2.0.0.tgz
$ tar -zxvf kafka_2.11-2.0.0.tgz
配置和启动kafka
#启动zookeeper 指定 zookeeper 配置文件
./bin/zookeeper-server-start.sh ./config/zookeeper.properties
#打开kafka配置文件,开启监听端口
$ vim server.properties
listeners=PLAINTEXT://localhost:9092
#启动kafka 服务
$ ./bin/kafka-server-start.sh ./config/server.properties
测试kafka
消费者
$ ./bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning
生产者
./bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test
pom
注:不确定版本对应的时候,把version去掉,spring-boot-starter-parent做了版本对应
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<!-- <dependency>-->
<!-- <groupId>org.mybatis.spring.boot</groupId>-->
<!-- <artifactId>mybatis-spring-boot-starter</artifactId>-->
<!-- <version>1.3.2</version>-->
<!-- </dependency>-->
<!-- https://mvnrepository.com/artifact/org.springframework.kafka/spring-kafka -->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<!-- <version>2.1.11.RELEASE</version>-->
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<!-- <version>2.4.0</version>-->
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.junit.vintage</groupId>
<artifactId>junit-vintage-engine</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
配置外网访问
可以参考
advertised.listeners=PLAINTEXT:/外网ip:9092
配置这个要把上面的listeners注释(listeners是内网访问)
application配置文件
server.port=8099
#kafka配置
spring.kafka.bootstrap-servers=47.100.90.225:9092
#=============== provider =======================
spring.kafka.producer.retries=0
# 每次批量发送消息的数量
spring.kafka.producer.batch-size=16384
spring.kafka.producer.buffer-memory=33554432
# 指定消息key和消息体的编解码方式
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
#=============== consumer =======================
# 指定默认消费者group id
spring.kafka.consumer.group-id=default
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.enable-auto-commit=true
spring.kafka.consumer.auto-commit-interval=100
# 指定消息key和消息体的编解码方式
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
ProducerDemo
@RestController
public class KafkaProducer {
private static final String MY_TOPIC = "test";
@Autowired
KafkaTemplate kafkaTemplate;
@PostMapping(value = "/kafka")
public void produce(@RequestParam(value = "msg")String msg){
// Message message = new Message();
// message.setId(12L);
// message.setMsg("hello jack");
// message.setTime(new Date());
kafkaTemplate.send(MY_TOPIC,msg);
}
}
ConsumerDemo
@Component
public class KafkaConsumer {
/**
* 定义此消费者接收topics = "demo"的消息,与controller中的topic对应上即可
* @param record 变量代表消息本身,可以通过ConsumerRecord<?,?>类型的record变量来打印接收的消息的各种信息
*/
//此处注释是因为没有demo这个topic,会报错
// @KafkaListener(topics = "demo")
// public void listen (ConsumerRecord<?, ?> record){
// System.out.printf("topic is %s, offset is %d, value is %s \n", record.topic(), record.offset(), record.value());
// }
@KafkaListener(topics = "test")
public void consume(String message){
System.out.println("receive msg "+ message);
}
}
效果