docker-compose 搭建kafka集群

创建目录

[root@meta kafka]# tree -L 2
.
├── kafka.yml
├── kfk1
│   ├── conf
│   ├── data
│   └── log
├── kfk2
│   ├── conf
│   ├── data
│   └── log
├── kfk3
│   ├── conf
│   ├── data
│   └── log
├── zk1
│   ├── conf
│   ├── data
│   ├── datalog
│   └── logs
├── zk2
│   ├── conf
│   ├── data
│   ├── datalog
│   └── logs
└── zk3
    ├── conf
    ├── data
    ├── datalog
    └── logs

27 directories, 1 file
[root@meta kafka]# 

在zookeeper/conf中配置zoo.cfg配置文件

# 分别位于zk1/config/zoo.cfg , zk2/config/zoo.cfg , zk3/config/zoo.cfg
#心跳时间 单位毫秒
tickTime=2000

#leader和follower初始连接能容忍的最大心跳数
initLimit=10

#leader和follower请求应答能容忍的最大心跳数
syncLimit=5

#zookeeper数据目录
dataDir=/data

#zookeeper日志目录
dataLogDir=/datalog

#客户端连接端口
clientPort=2181

#需要保留的文件个数
autopurge.snapRetainCount=3

#定时清理时间间隔 单位小时 设为0表示不清理
autopure.purgeInterval=1

#开启四字命令
4lw.commands.whitelist=*

#zookeeper集群信息 服务器地址:LF通信端口:选举端口
server.1=zk1:2888:3888
server.2=zk2:2888:3888
server.3=zk3:2888:3888

编写kafka.yml文件

version: '3.5'
services:
    zk1:
        image: zookeeper
        restart: always
        container_name: zk1
        hostname: zk1
        ports:
        - 2181:2181
        environment:
            ZOO_MY_ID: 1
            ZOO_SERVERS: server.1=zk1:2888:3888 server.2=zk2:2888:3888 server.3=zk3:2888:3888
        volumes:
        - ./zk1/conf/zoo.cfg:/conf/zoo.cfg
        - ./zk1/data:/data
        - ./zk1/datalog:/datalog
        - ./zk1/logs:/logs
        networks:
            kafka:
                aliases:
                - zk1

    zk2:
        image: zookeeper
        restart: always
        container_name: zk2
        hostname: zk2
        ports:
        - 2182:2181
        environment:
            ZOO_MY_ID: 2
            ZOO_SERVERS: server.1=zk1:2888:3888 server.2=zk2:2888:3888 server.3=zk3:2888:3888
        volumes:
        - ./zk2/conf/zoo.cfg:/conf/zoo.cfg
        - ./zk2/data:/data
        - ./zk2/datalog:/datalog
        - ./zk2/logs:/logs
        networks:
            kafka:
                aliases:
                - zk2

    zk3:
        image: zookeeper
        restart: always
        container_name: zk3
        hostname: zk3
        ports:
        - 2183:2181
        environment:
            ZOO_MY_ID: 3
            ZOO_SERVERS: server.1=zk1:2888:3888 server.2=zk2:2888:3888 server.3=zk3:2888:3888
        volumes:
        - ./zk3/conf/zoo.cfg:/conf/zoo.cfg
        - ./zk3/data:/data
        - ./zk3/datalog:/datalog
        - ./zk3/logs:/logs
        networks:
            kafka:
                aliases:
                - zk3
    kafka1:
        image: wurstmeister/kafka
        restart: always
        container_name: kafka1
        hostname: kafka1
        privileged: true
        ports:
        - "9092:9092"
        environment:
            KAFKA_BROKER_ID: 1
            KAFKA_ADVERTISED_HOST_NAME: kafka1                   ##
            KAFKA_ADVERTISED_PORT: 9092
            KAFKA_LISTENERS: PLAINTEXT://kafka1:9092
            KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://121.5.246.227:9092     ##【注意】 修改:宿主机IP
            KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181
            KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
            KAFKA_HEAP_OPTS: "-Xmx512M -Xms256M"
            JMX_PORT: 9988
        # 数据卷    
        volumes:
        - ./kfk1/config:/opt/kafka/config
        - ./kfk1/logs:/opt/kafka/logs
        - /etc/localtime:/etc/localtime
        #  依赖外部的zookeeper集群
        # external_links:
        # - zoo1
        # - zoo2
        # - zoo3
        # 依赖自身的zookeeper集群
        depends_on:
        - zk1
        - zk2
        - zk3
        networks:
            kafka:
                aliases:
                - kafka1
    kafka2:
        image: wurstmeister/kafka
        restart: always
        container_name: kafka2
        hostname: kafka2
        privileged: true
        ports:
        - "9093:9092"
        environment:
            KAFKA_BROKER_ID: 2
            KAFKA_ADVERTISED_HOST_NAME: kafka2                     ##
            KAFKA_ADVERTISED_PORT: 9092
            KAFKA_LISTENERS: PLAINTEXT://kafka2:9092
            KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://121.5.246.227:9093    ## 【注意】修改:宿主机IP
            KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181
            KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
            KAFKA_HEAP_OPTS: "-Xmx512M -Xms256M"
            JMX_PORT: 9988
        # 数据卷    
        volumes:
        - ./kfk2/config:/opt/kafka/config
        - ./kfk2/logs:/opt/kafka/logs
        - /etc/localtime:/etc/localtime
        #  依赖外部的zookeeper集群
        # external_links:
        # - zoo1
        # - zoo2
        # - zoo3
        # 依赖自身的zookeeper集群
        depends_on:
        - zk1
        - zk2
        - zk3
        networks:
            kafka:
                aliases:
                - kafka2
    kafka3:
        image: wurstmeister/kafka
        restart: always
        container_name: kafka3
        hostname: kafka3
        privileged: true
        ports:
        - "9094:9092"
        environment:
            KAFKA_BROKER_ID: 3
            KAFKA_ADVERTISED_HOST_NAME: kafka3                   ## 
            KAFKA_ADVERTISED_PORT: 9092
            KAFKA_LISTENERS: PLAINTEXT://kafka3:9092
            KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://121.5.246.227:9094    ## 【注意】修改:宿主机IP
            KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181
            KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
            KAFKA_HEAP_OPTS: "-Xmx512M -Xms256M"
            JMX_PORT: 9988
        # 数据卷    
        volumes:
        - ./kfk3/config:/opt/kafka/config
        - ./kfk3/logs:/opt/kafka/logs
        - /etc/localtime:/etc/localtime
        #  依赖外部的zookeeper集群
        # external_links:
        # - zoo1
        # - zoo2
        # - zoo3
        # 依赖自身的zookeeper集群
        depends_on:
        - zk1
        - zk2
        - zk3
        networks:
            kafka:
                aliases:
                - kafka3
    kafka-manager: # 内部的机器可以互相通信
        image: sheepkiller/kafka-manager:latest
        restart: always
        container_name: kafka-manager
        hostname: kafka-manager
        ports:
        - "9000:9000"
        # links:      # 连接本compose文件创建的container
        # - broker1
        # - broker2
        # - broker3
        depends_on:  # 连接本compose文件以外的container
        - zk1
        - zk2
        - zk3
        - kafka1
        - kafka2
        - kafka3
        environment:
            ZK_HOSTS: zk1:2181,zk2:2181,zk3:2181  # 因为可以互相通信,所以直接写hostname
            KAFKA_BROKERS: kafka1:9092, kafka2:9092, kafka3:9092 # 因为可以互相通信,所以直接写hostname
            APPLICATION_SECRET: letmein
            KAFKA_MANAGER_AUTH_ENABLED: "true" # 开启验证
            KAFKA_MANAGER_USERNAME: "admin" # 用户名
            KAFKA_MANAGER_PASSWORD: "admin" # 密码
            KM_ARGS: -Djava.net.preferIPv4Stack=true
        networks:
            kafka:
                aliases: 
                - manager
networks:
  kafka:
    driver: bridge

启动或关闭命令

# 启动
[root@meta kafka]# docker-compose -f kafka.yml  up -d
# 关闭
[root@meta kafka]# docker-compose -f kafka.yml  down

访问管理界面

http://IP:9000
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值