Ubuntu docker集群部署kafka、SpringBoot整合kafka方案

1. 安装部署kafka

1.1 安装docker-compose

apt install docker-compose

1.2 编写docker-compose.yml文件

version: '3.3'

services:
  zookeeper:
    image: wurstmeister/zookeeper
    ports:
      - 2181:2181
    container_name: zookeeper
    networks:
      default:
        ipv4_address: 172.19.0.11
  kafka0:
    image: wurstmeister/kafka
    depends_on:
      - zookeeper
    container_name: kafka0
    ports:
      - 9092:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:9092
      KAFKA_LISTENERS: PLAINTEXT://kafka0:9092
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_BROKER_ID: 0
    volumes:
      - /root/data/kafka0/data:/data
      - /root/data/kafka0/log:/datalog
    networks:
      default:
        ipv4_address: 172.19.0.12
  kafka1:
    image: wurstmeister/kafka
    depends_on:
      - zookeeper
    container_name: kafka1
    ports:
      - 9093:9093
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9093
      KAFKA_LISTENERS: PLAINTEXT://kafka1:9093
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_BROKER_ID: 1
    volumes:
      - /root/data/kafka1/data:/data
      - /root/data/kafka1/log:/datalog
    networks:
      default:
        ipv4_address: 172.19.0.13
  kafka2:
    image: wurstmeister/kafka
    depends_on:
      - zookeeper
    container_name: kafka2
    ports:
      - 9094:9094
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9094
      KAFKA_LISTENERS: PLAINTEXT://kafka2:9094
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_BROKER_ID: 2
    volumes:
      - /root/data/kafka2/data:/data
      - /root/data/kafka2/log:/datalog
    networks:
      default:
        ipv4_address: 172.19.0.14
  kafka-manager:
    image: sheepkiller/kafka-manager:latest
    restart: unless-stopped
    container_name: kafka-manager
    hostname: kafka-manager
    ports:
      - "9000:9000"
    links:            # 连接本compose文件创建的container
      - kafka0
      - kafka1
      - kafka2
    external_links:   # 连接本compose文件以外的container
      - zookeeper
    environment:
      ZK_HOSTS: 192.168.210.251:2181                 ## 修改:宿主机IP
      TZ: CST-8
networks:
  default:
    external:
      name: zookeeper_kafka

1.3 创建子网

docker network create --subnet 172.19.0.0/16 --gateway 172.19.0.1 zookeeper_kafka

1.4 搭建

docker-compose -f docker-compose.yml up -d

在这里插入图片描述
查看容器启动状态

docker ps -a

在这里插入图片描述

1.5 验证

1.5.1 进入kafka0

docker exec -it kafka0 bash

1.5.2 进入bin目录

cd /opt/kafka_2.13-2.7.0/bin/

1.5.3 创建Topic

192.168.210.251设置为你的zookeeper的IP地址
partitions 分区数
replication-factor 副本数

./kafka-topics.sh --create --topic chat --partitions 5 --zookeeper 192.168.210.251:2181 --replication-factor 3

1.5.4 设置生产者

./kafka-console-producer.sh --broker-list kafka0:9092 --topic chat

1.5.5 进入kafka2

打开一个新的shell界面

docker exec -it kafka2 bash
cd /opt/kafka_2.13-2.7.0/bin/

1.5.6 设置消费者

./kafka-console-consumer.sh --bootstrap-server kafka2:9094 --topic chat

1.5.7 消息发送与接收

生产者:
在这里插入图片描述
消费者:
在这里插入图片描述

2. SpringBoot整合kafka

2.1 引入POM依赖

<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
</dependency>

2.2 application.yml配置文件

spring:
	kafka:
	    ###########【Kafka集群】###########
	    bootstrap-servers: 10.20.121.82:9092,10.20.121.82:9093,10.20.121.82:9094
	    ###########【初始化生产者配置】###########
	    producer:
	      # 重试次数
	      retries: 0
	      # 应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
	      acks: 1
	      # 批量大小
	      batch-size: 16384
	      # 提交延时
	      properties:
	        linger:
	          ms: 0
	    # 当生产端积累的消息达到batch-size或接收到消息linger.ms后,生产者就会将消息提交给kafka
	    # linger.ms为0表示每接收到一条消息就提交给kafka,这时候batch-size其实就没用了
	      # 生产端缓冲区大小
	      buffer-memory: 33554432
	      # Kafka提供的序列化和反序列化类
	      key-serializer: org.apache.kafka.common.serialization.StringSerializer
	      value-serializer: org.apache.kafka.common.serialization.StringSerializer
	    ###########【初始化消费者配置】###########
	    consumer:
	      properties:
	        # 默认的消费组ID
	        group:
	          id: defaultConsumerGroup
	        # 消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发reBalance操作)
	        session:
	          timeout:
	            ms: 120000
	        # 消费请求超时时间
	        request:
	          timeout:
	            ms: 180000
	      # 是否自动提交offset
	      enable-auto-commit: true
	      # 提交offset延时(接收到消息后多久提交offset)
	      auto:
	        commit:
	          interval:
	            ms: 1000
	      # 当kafka中没有初始offset或offset超出范围时将自动重置offset
	      # earliest:重置为分区中最小的offset;
	      # latest:重置为分区中最新的offset(消费分区中新产生的数据);
	      # none:只要有一个分区不存在已提交的offset,就抛出异常;
	      auto-offset-reset: latest
	      # Kafka提供的序列化和反序列化类
	      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
	      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
	    # 消费端监听的topic不存在时,项目启动会报错(关掉)
	    listener:
	      missing-topics-fatal: false

2.3 简单生产者

import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import javax.annotation.Resource;

@RestController
@RequestMapping("/kafka/")
public class KafkaController {
    @Resource
    private KafkaTemplate<String, Object> kafkaTemplate;

    @PostMapping("sendMsg")
    public void sendMsg(@RequestBody KafkaDTO kafkaDTO) {
        kafkaTemplate.send("chat", kafkaDTO.getMsg());
    }
}

2.4 简单消费者

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;

@Component
public class KafkaConsumer {
    @KafkaListener(topics = {"chat"})
    /**
     * 消费监听
     * */
    public void chatMsg(ConsumerRecord<?, ?> record){
        // 消费的哪个topic、partition的消息,打印出消息内容
        System.out.println("简单消费:" + record.topic()
                + "-" + record.partition()
                + "-" + record.value());
    }
}

2.5 消息测试

在这里插入图片描述
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值