docker 搭建 单机版 zookeeper 和 kafka 伪集群 外加 kafka-manager

2 篇文章 0 订阅
1 篇文章 0 订阅
该博客介绍了如何通过docker-compose在服务器上部署Zookeeper和Kafka的伪集群。步骤包括安装docker-compose、创建网络、配置及启动容器,并处理了因配置错误导致的问题。最后,通过kafka-manager验证了集群的正确运行。
摘要由CSDN通过智能技术生成

最近学习了docker 然后想在自己的服务器上部署一套伪集群的zookeeper和kafka,经过我自己的多次尝试下,我总结了以下步骤

我使用的是docker-compose的方式来搭建的

  • 步骤一:首先你需要安装docker-compose这个东西的,安装命令:curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
    安装完成后要赋予给docker-compose执行权限,运行命令:chmod +x /usr/local/bin/docker-compose
  • 步骤二:在docker新增一个网卡,命令如下
docker network create --driver bridge --subnet=172.23.0.0/16 --gateway=172.23.0.1 zoo_kafka
  • 步骤三:在随意目录新建kafka和zookeeper文件夹,以便存放kafka和zookeeper需要用到的docker-compose.yml文件,如图所示
    在这里插入图片描述
  • 步骤四:分别在kafka目录下的docker-compose.yml文件和zookeeper目录下的docker-compose.yml进行编辑,前提条件是你必须拉取zookeeper和kafka的镜像,配置如下
    zookeeper的docker-compose.yml
version: '3.4'
services:
  zoo1:
    image: wurstmeister/zookeeper
    restart: always
    hostname: zk1
    container_name: zk1
    ports:
    - 2181:2181
    volumes:
    - "/Users/yz/Development/volume/zkcluster/zoo1/data:/data"
    - "/Users/yz/Development/volume/zkcluster/zoo1/datalog:/datalog"
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
    networks:
      zoo_kafka:
        ipv4_address: 172.23.0.11 //zookeeper和kafka的ip必须都在同一网卡下不然kafka连接不上zookeeper

  zoo2:
    image: wurstmeister/zookeeper
    restart: always
    hostname: zk2
    container_name: zk2
    ports:
    - 2182:2181
    volumes:
    - "/Users/yz/Development/volume/zkcluster/zoo2/data:/data"
    - "/Users/yz/Development/volume/zkcluster/zoo2/datalog:/datalog"
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zoo3:2888:3888
    networks:
      zoo_kafka:
        ipv4_address: 172.23.0.12 //zookeeper和kafka的ip必须都在同一网卡下不然kafka连接不上zookeeper

  zoo3:
    image: wurstmeister/zookeeper
    restart: always
    hostname: zk3
    container_name: zk3
    ports:
    - 2183:2181
    volumes:
    - "/Users/yz/Development/volume/zkcluster/zoo3/data:/data"
    - "/Users/yz/Development/volume/zkcluster/zoo3/datalog:/datalog"
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=0.0.0.0:2888:3888
    networks:
      zoo_kafka:
        ipv4_address: 172.23.0.13 //zookeeper和kafka的ip必须都在同一网卡下不然kafka连接不上zookeeper

networks:
  zoo_kafka:
    external:
      name: zoo_kafka

kafka的docker-compose.yml

version: '2'
services:
  kafka1:
    image: wurstmeister/kafka
    restart: always
    hostname: kafka1
    container_name: kafka1
    ports:
    - 9092:9092
    environment:
      KAFKA_ADVERTISED_HOST_NAME: kafka1
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181 //连接zookeeper
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://公网IP:9092 //对外监听地址 必须是公网ip不然kafka-manager查不到
      KAFKA_HEAP_OPTS: -Xmx256M -Xms128M //设置内存
    volumes:
    - /Users/yz/Development/volume/kfkluster/kafka1/logs:/kafka
    external_links: //导入外部容器
    - zk1
    - zk2
    - zk3
    networks: //连接网卡
      zoo_kafka: //网卡名
        ipv4_address: 172.23.0.14 //给当前节点配置ip zookeeper和kafka的ip必须都在同一网卡下不然kafka连接不上zookeeper

  kafka2:
    image: wurstmeister/kafka
    restart: always
    hostname: kafka2
    container_name: kafka2
    ports:
    - 9093:9093
    environment:
      KAFKA_ADVERTISED_HOST_NAME: kafka2
      KAFKA_ADVERTISED_PORT: 9093
      KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
      KAFKA_HEAP_OPTS: -Xmx256M -Xms128M
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://公网IP:9093 //对外监听地址 必须是公网ip不然kafka-manager查不到
    volumes:
    - /Users/yz/Development/volume/kfkluster/kafka2/logs:/kafka
    external_links:
    - zk1
    - zk2
    - zk3
    networks:
      zoo_kafka:
        ipv4_address: 172.23.0.15 //给当前节点配置ip zookeeper和kafka的ip必须都在同一网卡下不然kafka连接不上zookeeper

  kafka3:
    image: wurstmeister/kafka
    restart: always
    hostname: kafka3
    container_name: kafka3
    ports:
    - 9094:9094
    environment:
      KAFKA_ADVERTISED_HOST_NAME: kafka3
      KAFKA_ADVERTISED_PORT: 9094
      #KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
      KAFKA_HEAP_OPTS: -Xmx256M -Xms128M
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://公网IP:9094 //对外监听地址 必须是公网ip不然kafka-manager查不到
    volumes:
    - /Users/yz/Development/volume/kfkluster/kafka3/logs:/kafka
    external_links:
    - zk1
    - zk2
    - zk3
    networks:
      zoo_kafka:
        ipv4_address: 172.23.0.16 //给当前节点配置ip zookeeper和kafka的ip必须都在同一网卡下不然kafka连接不上zookeeper
        
        
  kafka-manager:
    image: sheepkiller/kafka-manager:latest
    restart: always
    container_name: kafka-manager
    hostname: kafka-manager
    ports:
    - "9000:9000"
    links:      # 连接本compose文件创建的container
    - kafka1
    - kafka2
    - kafka3
    external_links:  # 连接本compose文件以外的container
    - zk1
    - zk2
    - zk3
    environment:
      ZK_HOSTS: zk1:2181,zk2:2181,zk3:2181
      KAFKA_BROKERS: kafka1:9092
      APPLICATION_SECRET: letmein
      KM_ARGS: -Djava.net.preferIPv4Stack=true
    networks:
      zoo_kafka:
        ipv4_address: 172.23.0.10 //给当前节点配置ip zookeeper和kafka的ip必须都在同一网卡下不然kafka连接不上zookeeper    

networks:
  zoo_kafka:
    external:
      name: zoo_kafka

  • 步骤五:使用docker-compose up -d命令分别在kafka和zookeeper目录下执行
    如图所示:
    在这里插入图片描述
    我的服务器内存太小 所以就启动了一个 不然会报错
  • 步骤六:http://你的公网ip:9000 访问kafka-manager,新建一个cluster,如图:在这里插入图片描述
    你会看到有一个kafka的节点,因为我只启动了一个,这样搭建zookeeper和kafka伪集群就成功了

我搭建时kafka出现了个报错信息,如图在这里插入图片描述

原因:因为我之前多次停止、启动容器,导致了zookeeper上存的Cluster ID和kafka配置文件的Cluster ID不一致导致的报错

解决方法:先停止容器,去到 kafka的docker-compose.yml文件中找到这个路径/Users/yz/Development/volume/kfkluster/kafka3/logs,然后在这个路径下会有一些文件,找到mete.properties将它删除 即可

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值