Kafka集群配置-docker版本

一、首先搭建zookeeper集群

脚本

(1)创建zookeeper配置文件:

/data/zookeeper/zoo1/config/zoo.cfg

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
# dataDir=/opt/zookeeper-3.4.13/data
dataDir=/data
dataLogDir=/datalog
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1

server.1=zoo1:2888:3888
server.2=zoo2:2888:3888
server.3=zoo3:2888:3888

(2)使用docker-compose文件进行搭建:

docker-compose-zookeeper.yml文件:

version: '3.7'
services:
  zoo1:
    container_name: zoo1
    hostname: zoo1
    image: wurstmeister/zookeeper
    privileged: true
    restart: unless-stopped
    ports:
      - 2181:2181
    volumes: # 挂载数据卷
      - /data/zookeeper/zoo1/config/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg
      - /data/zookeeper/zoo1/data:/data
      - /data/zookeeper/zoo1/datalog:/datalog
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 1 # 节点ID
      ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888 # zookeeper节点列表
    
  zoo2:
    container_name: zoo2
    hostname: zoo2
    image: wurstmeister/zookeeper
    privileged: true
    restart: unless-stopped
    ports:
      - 2182:2181
    volumes: # 挂载数据卷
      - /data/zookeeper/zoo2/config/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg
      - /data/zookeeper/zoo2/data:/data
      - /data/zookeeper/zoo2/datalog:/datalog
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 2 # 节点ID
      ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888 # zookeeper节点列表
    
  zoo3:
    container_name: zoo3
    hostname: zoo3
    image: wurstmeister/zookeeper
    privileged: true
    restart: unless-stopped
    ports:
      - 2183:2181
    volumes: # 挂载数据卷
      - /data/zookeeper/zoo3/config/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg
      - /data/zookeeper/zoo3/data:/data
      - /data/zookeeper/zoo3/datalog:/datalog
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 3 # 节点ID
      ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888 # zookeeper节点列表

启动:

docker-compose -f docker-compose-zookeeper.yml up -d

二、然后搭建kafka集群

(1)先创建虚拟机网络

docker network create -d bridge --subnet 172.19.0.0/24 kafka_net

(2)使用docker-compose文件搭建

version: '3.2'
services:
  broker1:
    container_name: broker1
    hostname: broker1
    image: wurstmeister/kafka
    privileged: true
    restart: unless-stopped
    ports:
      - "9986:9986"
      - "9091:9091"
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_LISTENERS: PLAINTEXT://:9091
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.16.12.163:9091
      KAFKA_ADVERTISED_HOST_NAME: 172.16.12.163
      KAFKA_ADVERTISED_PORT: 9091
      KAFKA_ZOOKEEPER_CONNECT: 172.16.12.130:2181,172.16.12.130:2182,172.16.12.130:2183
      JMX_PORT: 9986
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /data/kafka/broker1:/kafka/kafka-logs-broker2
    networks:
      default:
        ipv4_address: 172.19.0.11
 
    
  broker2:
    container_name: broker2
    hostname: broker2
    image: wurstmeister/kafka
    privileged: true
    restart: unless-stopped
    ports:
      - "9987:9987"
      - "9092:9092"
    environment:
      KAFKA_BROKER_ID: 2
      KAFKA_LISTENERS: PLAINTEXT://:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.16.12.163:9092
      KAFKA_ADVERTISED_HOST_NAME: 172.16.12.163
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_ZOOKEEPER_CONNECT: 172.16.12.130:2181,172.16.12.130:2182,172.16.12.130:2183
      JMX_PORT: 9987
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /data/kafka/broker2:/kafka/kafka-logs-broker2
    networks:
      default:
        ipv4_address: 172.19.0.12


  broker3:
    container_name: broker3
    hostname: broker3
    image: wurstmeister/kafka
    privileged: true
    restart: unless-stopped
    ports:
      - "9988:9988"
      - "9093:9093"
    environment:
      KAFKA_BROKER_ID: 3
      KAFKA_LISTENERS: PLAINTEXT://:9093
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.16.12.163:9093
      KAFKA_ADVERTISED_HOST_NAME: 172.16.12.163
      KAFKA_ADVERTISED_PORT: 9093
      KAFKA_ZOOKEEPER_CONNECT: 172.16.12.130:2181,172.16.12.130:2182,172.16.12.130:2183
      JMX_PORT: 9988
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /data/kafka/broker3:/kafka/kafk-logs-broker3
    networks:
      default:
        ipv4_address: 172.19.0.13
   


  kafka-manager:
    image: sheepkiller/kafka-manager:latest
    container_name: kafka-manager
    hostname: kafka-manager
    restart: unless-stopped
    ports:
      - 9000:9000
    links:      # 连接本compose文件创建的container
      - broker1
      - broker2
      - broker3

    environment:
      ZK_HOSTS: 172.16.12.130:2181,172.16.12.130:2182,172.16.12.130:2183
      KAFKA_BROKERS: broker1:9091,broker2:9092,broker3:9093
      APPLICATION_SECRET: 123456
      KM_ARGS: -Djava.net.preferIPv4Stack=true
    networks:
      default:
        ipv4_address: 172.19.0.14

networks:
  default:
    external:
      name: kafka_net

启动:

docker-compose -f docker-compose-kafka.yml up -d

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值