docker-compose一键部署zookeeper、kafka、storm、flink分布式集群

version: '3'

services:
  zookeeper1:
    image: zookeeper:3.8.2
    hostname: zookeeper1
    container_name: zookeeper1
    privileged: true
    restart: always
    networks:
      net_zoo_kafka:
    ports:
      - "2181:2181"
    volumes:
      - d:/app/docker/zookeeper/zookeeper1/conf:/conf
      - d:/app/docker/zookeeper/zookeeper1/data/zookeeper1/data:/data
      - d:/app/docker/zookeeper/zookeeper1/log/zookeeper1/datalog:/datalog
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zookeeper2:2888:3888;2181 server.3=zookeeper3:2888:3888;2181
      ALLOW_ANONYMOUS_LOGIN: "yes"

  zookeeper2:
    image: zookeeper:3.8.2
    hostname: zookeeper2
    container_name: zookeeper2
    privileged: true
    restart: always
    networks:
      net_zoo_kafka:
    ports:
      - "2182:2181"
    volumes:
      - d:/app/docker/zookeeper/zookeeper2/conf:/conf
      - d:/app/docker/zookeeper/zookeeper2/data/zookeeper2/data:/data
      - d:/app/docker/zookeeper/zookeeper2/log/zookeeper2/datalog:/datalog
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zookeeper1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zookeeper3:2888:3888;2181
      ALLOW_ANONYMOUS_LOGIN: "yes"

  zookeeper3:
    image: zookeeper:3.8.2
    hostname: zookeeper3
    container_name: zookeeper3
    privileged: true
    restart: always
    networks:
      net_zoo_kafka:
    ports:
      - "2183:2181"
    volumes:
      - d:/app/docker/zookeeper/zookeeper3/conf:/conf
      - d:/app/docker/zookeeper/zookeeper3/data/zookeeper3/data:/data
      - d:/app/docker/zookeeper/zookeeper3/log/zookeeper3/datalog:/datalog
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zookeeper1:2888:3888;2181 server.2=zookeeper2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
      ALLOW_ANONYMOUS_LOGIN: "yes"

  kafka1:
    image: bitnami/kafka:3.5.1
    hostname: kafka1
    container_name: kafka1
    privileged: true
    restart: always
    networks:
      net_zoo_kafka:
    ports:
      - "9092:9092"
    environment:
      - ALLOW_NONE_AUTHENTICATION=yes
      - ALLOW_PLAINTEXT_LISTENER=yes 
      - KAFKA_BROKER_ID=2
      - KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:9092
      - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka1:9092
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181/kafka
    volumes:
      - d:/app/docker/kafka/kafka1/docker.sock:/var/run/docker.sock
      - d:/app/docker/kafka/kafka1/data/:/kafka
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3

  kafka2:
    image: bitnami/kafka:3.5.1
    hostname: kafka2
    container_name: kafka2
    privileged: true
    restart: always
    networks:
      net_zoo_kafka:
    ports:
      - "9093:9093"
    environment:
      - ALLOW_NONE_AUTHENTICATION=yes
      - ALLOW_PLAINTEXT_LISTENER=yes 
      - KAFKA_BROKER_ID=3
      - KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:9093
      - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka2:9093
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181/kafka
    volumes:
      - d:/app/docker/kafka/kafka2/docker.sock:/var/run/docker.sock
      - d:/app/docker/kafka/kafka2/data/:/kafka
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3

  kafka3:
    image: bitnami/kafka:3.5.1
    hostname: kafka3
    container_name: kafka3
    privileged: true
    restart: always
    networks:
      net_zoo_kafka:
    ports:
      - "9094:9094"
    environment:
      - ALLOW_NONE_AUTHENTICATION=yes
      - ALLOW_PLAINTEXT_LISTENER=yes 
      - KAFKA_BROKER_ID=4
      - KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:9094
      - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka3:9094
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181/kafka
    volumes:
      - d:/app/docker/kafka/kafka3/docker.sock:/var/run/docker.sock
      - d:/app/docker/kafka/kafka3/data/:/kafka
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3

  kafka-manager:
    image: sheepkiller/kafka-manager
    container_name: kafka-manager
    hostname: kafka-manager
    privileged: true
    networks:
      net_zoo_kafka:
    ports:
      - "9000:9000"
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3
      - kafka1
      - kafka2
      - kafka3
    environment:
      ZK_HOSTS: zookeeper1:2181,zookeeper2:2182,zookeeper3:2183/kafka
      KAFKA_BROKERS: kafka1:9092,kafka2:9093,kafka3:9094
      APPLICATION_SECRET: letmein
      KM_ARGS: -Djava.net.preferIPv4Stack=true

  nimbus:
    image: storm:2.5.0
    container_name: nimbus
    command: storm nimbus
    networks:
      - net_zoo_kafka
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3
    volumes:
      - D:/app/docker/storm/logs:/logs
      - D:/app/docker/storm/data:/data
    restart: always
    ports:
      - 6627:6627
      

  supervisor:
    image: storm:2.5.0
    container_name: supervisor
    command: storm supervisor
    networks:
      - net_zoo_kafka
    depends_on:
      - nimbus
      - zookeeper1
      - zookeeper2
      - zookeeper3
    restart: always
    ports:
      - 6633:6633

  ui:
    image: storm:2.5.0
    container_name: stormui
    command: storm ui
    networks:
      - net_zoo_kafka
    depends_on:
      - nimbus
      - zookeeper1
      - zookeeper2
      - zookeeper3
    restart: always
    ports:
      - 80:8080
      
  jobmanager:
    image: flink:1.18.0-scala_2.12-java8
    container_name: flink-jobmanager
    networks:
      - net_zoo_kafka
    expose:
      - "6123"
    ports:
      - "8082:8081"
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3
    restart: always    
    volumes:
      - D:/app/docker/flink/conf:/opt/flink/conf
    command: jobmanager
    environment:
      - JOB_MANAGER_RPC_ADDRESS=flink-jobmanager
      - JOB_MANAGER_HEAP_MEMORY=1024m
      - JOB_MANAGER_TOTAL_MEMORY=2048m
      - JOB_MANAGER_OFF_HEAP_MEMORY=256m
      - JOB_MANAGER_REST_PORT=8081 
      
  taskmanager1:
    image: flink:1.18.0-scala_2.12-java8
    container_name: flink-taskmanager1
    networks:
      - net_zoo_kafka
    depends_on:
      - jobmanager
      - zookeeper1
      - zookeeper2
      - zookeeper3
    expose:
      - "6123"
    volumes:
      - D:/app/docker/flink/conf:/opt/flink/conf
    command: taskmanager  
    environment:
      - JOB_MANAGER_RPC_ADDRESS=flink-jobmanager
      - TASK_MANAGER_NUMBER_OF_TASK_SLOTS=1
      - FLINK_TM_HEAP=1024m
      - FLINK_TM_MANAGED_MEMORY_SIZE=512m
      - FLINK_TM_REST_PORT=8082 

  taskmanager2:
    image: flink:1.18.0-scala_2.12-java8
    container_name: flink-taskmanager2
    networks:
      - net_zoo_kafka
    depends_on:
      - jobmanager
      - zookeeper1
      - zookeeper2
      - zookeeper3
    expose:
      - "6123" 
    volumes:
      - D:/app/docker/flink/conf:/opt/flink/conf
    command: taskmanager 
    environment:
      - JOB_MANAGER_RPC_ADDRESS=flink-jobmanager   
      - TASK_MANAGER_NUMBER_OF_TASK_SLOTS=1
      - FLINK_TM_HEAP=1024m
      - FLINK_TM_MANAGED_MEMORY_SIZE=512m
      - FLINK_TM_REST_PORT=8083 
  
networks:
  net_zoo_kafka:
    driver: bridge

使用上述文件内容,在文件路径下使用对应指令即可一键生成相关集群

docker-compose up -d

启动后需要更改一部分配置

flink需要提前设置conf文件,并添加以下内容,否则会导致jobmanager启动失败

high-availability: zookeeper
high-availability.zookeeper.quorum: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
high-availability.storageDir: /opt/flink/ha
jobmanager.memory.heap.size: 1024m
jobmanager.memory.flink.size: 2048m
jobmanager.memory.process.size: 3072m
jobmanager.rpc.address: flink-jobmanager
blob.server.port: 6124
query.server.port: 6125
taskmanager.numberOfTaskSlots: 1

接着分别进入到storm相关容器内修改配置文件

storm.zookeeper.servers:
  - "zookeeper1"
  - "zookeeper2"
  - "zookeeper3"
nimbus.seeds: ["nimbus"]
storm.log.dir: "/logs"
storm.local.dir: "/data"

修改完成后即可

## 2023/12/09更新,解决了kafka外部无法连接的错误修改了flink的版本

  • 11
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值