文章目录
1. 安装部署kafka
1.1 安装docker-compose
apt install docker-compose
1.2 编写docker-compose.yml文件
version: '3.3'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- 2181:2181
container_name: zookeeper
networks:
default:
ipv4_address: 172.19.0.11
kafka0:
image: wurstmeister/kafka
depends_on:
- zookeeper
container_name: kafka0
ports:
- 9092:9092
environment:
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:9092
KAFKA_LISTENERS: PLAINTEXT://kafka0:9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_BROKER_ID: 0
volumes:
- /root/data/kafka0/data:/data
- /root/data/kafka0/log:/datalog
networks:
default:
ipv4_address: 172.19.0.12
kafka1:
image: wurstmeister/kafka
depends_on:
- zookeeper
container_name: kafka1
ports:
- 9093:9093
environment:
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9093
KAFKA_LISTENERS: PLAINTEXT://kafka1:9093
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_BROKER_ID: 1
volumes:
- /root/data/kafka1/data:/data
- /root/data/kafka1/log:/datalog
networks:
default:
ipv4_address: 172.19.0.13
kafka2:
image: wurstmeister/kafka
depends_on:
- zookeeper
container_name: kafka2
ports:
- 9094:9094
environment:
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9094
KAFKA_LISTENERS: PLAINTEXT://kafka2:9094
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_BROKER_ID: 2
volumes:
- /root/data/kafka2/data:/data
- /root/data/kafka2/log:/datalog
networks:
default:
ipv4_address: 172.19.0.14
kafka-manager:
image: sheepkiller/kafka-manager:latest
restart: unless-stopped
container_name: kafka-manager
hostname: kafka-manager
ports:
- "9000:9000"
links: # 连接本compose文件创建的container
- kafka0
- kafka1
- kafka2
external_links: # 连接本compose文件以外的container
- zookeeper
environment:
ZK_HOSTS: 192.168.210.251:2181 ## 修改:宿主机IP
TZ: CST-8
networks:
default:
external:
name: zookeeper_kafka
1.3 创建子网
docker network create --subnet 172.19.0.0/16 --gateway 172.19.0.1 zookeeper_kafka
1.4 搭建
docker-compose -f docker-compose.yml up -d
查看容器启动状态
docker ps -a
1.5 验证
1.5.1 进入kafka0
docker exec -it kafka0 bash
1.5.2 进入bin目录
cd /opt/kafka_2.13-2.7.0/bin/
1.5.3 创建Topic
192.168.210.251设置为你的zookeeper的IP地址
partitions 分区数
replication-factor 副本数
./kafka-topics.sh --create --topic chat --partitions 5 --zookeeper 192.168.210.251:2181 --replication-factor 3
1.5.4 设置生产者
./kafka-console-producer.sh --broker-list kafka0:9092 --topic chat
1.5.5 进入kafka2
打开一个新的shell界面
docker exec -it kafka2 bash
cd /opt/kafka_2.13-2.7.0/bin/
1.5.6 设置消费者
./kafka-console-consumer.sh --bootstrap-server kafka2:9094 --topic chat
1.5.7 消息发送与接收
生产者:
消费者:
2. SpringBoot整合kafka
2.1 引入POM依赖
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
2.2 application.yml配置文件
spring:
kafka:
###########【Kafka集群】###########
bootstrap-servers: 10.20.121.82:9092,10.20.121.82:9093,10.20.121.82:9094
###########【初始化生产者配置】###########
producer:
# 重试次数
retries: 0
# 应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
acks: 1
# 批量大小
batch-size: 16384
# 提交延时
properties:
linger:
ms: 0
# 当生产端积累的消息达到batch-size或接收到消息linger.ms后,生产者就会将消息提交给kafka
# linger.ms为0表示每接收到一条消息就提交给kafka,这时候batch-size其实就没用了
# 生产端缓冲区大小
buffer-memory: 33554432
# Kafka提供的序列化和反序列化类
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
###########【初始化消费者配置】###########
consumer:
properties:
# 默认的消费组ID
group:
id: defaultConsumerGroup
# 消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发reBalance操作)
session:
timeout:
ms: 120000
# 消费请求超时时间
request:
timeout:
ms: 180000
# 是否自动提交offset
enable-auto-commit: true
# 提交offset延时(接收到消息后多久提交offset)
auto:
commit:
interval:
ms: 1000
# 当kafka中没有初始offset或offset超出范围时将自动重置offset
# earliest:重置为分区中最小的offset;
# latest:重置为分区中最新的offset(消费分区中新产生的数据);
# none:只要有一个分区不存在已提交的offset,就抛出异常;
auto-offset-reset: latest
# Kafka提供的序列化和反序列化类
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# 消费端监听的topic不存在时,项目启动会报错(关掉)
listener:
missing-topics-fatal: false
2.3 简单生产者
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Resource;
@RestController
@RequestMapping("/kafka/")
public class KafkaController {
@Resource
private KafkaTemplate<String, Object> kafkaTemplate;
@PostMapping("sendMsg")
public void sendMsg(@RequestBody KafkaDTO kafkaDTO) {
kafkaTemplate.send("chat", kafkaDTO.getMsg());
}
}
2.4 简单消费者
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
@Component
public class KafkaConsumer {
@KafkaListener(topics = {"chat"})
/**
* 消费监听
* */
public void chatMsg(ConsumerRecord<?, ?> record){
// 消费的哪个topic、partition的消息,打印出消息内容
System.out.println("简单消费:" + record.topic()
+ "-" + record.partition()
+ "-" + record.value());
}
}
2.5 消息测试