部署zk集群、kafka集群-docker-compose

部署zk集群、kafka集群-docker-compose

  1. 新建网络
[root@localhost srv]# docker network create --driver bridge --subnet 172.23.0.0/16 --gateway 172.23.0.1  zookeeper_network
[root@localhost srv]# docker network ls //查看网络
  1. 安装zookeeper及kafka镜像
[root@localhost srv]# docker pull zookeeper
[root@localhost srv]# docker pull wurstmeister/kafka
[root@localhost srv]# docker pull hlebalbau/kafka-manager #管理工具
  1. 创建必要文件及文件夹
[root@localhost srv]# mkdir {zoo1,zoo2,zoo3,kafka1,kafka2,kafka3} //创建kafka,zk文件夹

#在zoo1,zoo2,zoo3中分别创建myid文件,并写入分别写入id数字,如zoo1中的myid中写入1
[root@localhost srv]# cat zoo1/data/myid 
1
[root@localhost srv]# mkdir zoo1/{data,datalog} //zoo2,zoo3依次

#创建zoo配置文件zoo.cfg
[root@localhost srv]# cat zoo1/zoo.cfg 
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/data
dataLogDir=/datalog
clientPort=2181
server.1=172.23.0.11:2888:3888;2181
server.2=172.23.0.12:2888:3888;2181
server.3=172.23.0.13:2888:3888;2181

[root@localhost srv]# mkdir kafka1/{kafka,logs} //kafka2,kafka3依次
  1. 创建docker-compose.yml文件
[root@localhost srv]# cat docker-compose.yml 
version: '2'

services:

  zoo1:
    image: zookeeper # 镜像
    restart: always # 重启
    container_name: zoo1
    hostname: zoo1
    ports:
    - "2181:2181"

    volumes:
    - "./zoo1/zoo.cfg:/conf/zoo.cfg"
    - "./zoo1/data:/data"
    - "./zoo1/datalog:/datalog"
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=172.23.0.11:2888:3888 server.2=172.23.0.12:2888:3888 server.3=172.23.0.13:2888:3888

    networks:
      default:
        ipv4_address: 172.23.0.11

  zoo2:
    image: zookeeper # 镜像
    restart: always # 重启
    container_name: zoo2
    hostname: zoo2
    ports:
    - "2182:2181"

    volumes:
    - "./zoo2/zoo.cfg:/conf/zoo.cfg"
    - "./zoo2/data:/data"
    - "./zoo2/datalog:/datalog"
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=172.23.0.11:2888:3888 server.2=172.23.0.12:2888:3888 server.3=172.23.0.13:2888:3888

    networks:
      default:
        ipv4_address: 172.23.0.12

  zoo3:
    image: zookeeper # 镜像
    restart: always # 重启
    container_name: zoo3
    hostname: zoo3
    ports:
    - "2183:2181"

    volumes:
    - "./zoo3/zoo.cfg:/conf/zoo.cfg"
    - "./zoo3/data:/data"
    - "./zoo3/datalog:/datalog"
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=172.23.0.11:2888:3888 server.2=172.23.0.12:2888:3888 server.3=172.23.0.13:2888:3888

    networks:
      default:
        ipv4_address: 172.23.0.13

  kafka1:
    image: wurstmeister/kafka # 镜像
    restart: always
    container_name: kafka1
    hostname: kafka1
    privileged: true
    ports:
    - 9092:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.23.0.14:9092 # 暴露在外的地址
      KAFKA_ADVERTISED_HOST_NAME: 172.23.0.14 # 
      KAFKA_HOST_NAME: kafka1
      KAFKA_ZOOKEEPER_CONNECT: 172.23.0.11:2181,172.23.0.12:2181,172.23.0.13:2181
      KAFKA_ADVERTISED_PORT: 9092 # 暴露在外的端口
      KAFKA_BROKER_ID: 0 # 
      KAFKA_LISTENERS: PLAINTEXT://172.23.0.14:9092
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
    volumes:
    - "./kafka1/kafka:/kafka"
    - "./kafka1/logs:/opt/kafka/logs"
    links:
    - zoo1
    - zoo2
    - zoo3

    networks:
      default:
        ipv4_address: 172.23.0.14

  kafka2:
    image: wurstmeister/kafka # 镜像
    restart: always
    container_name: kafka2
    hostname: kafka2
    privileged: true
    ports:
    - 9093:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.23.0.15:9092 # 暴露在外的地址
      KAFKA_ADVERTISED_HOST_NAME: 172.23.0.15 
      KAFKA_HOST_NAME: kafka2
      KAFKA_ZOOKEEPER_CONNECT: 172.23.0.11:2181,172.23.0.12:2181,172.23.0.13:2181
      KAFKA_ADVERTISED_PORT: 9093 # 暴露在外的端口
      KAFKA_BROKER_ID: 1 # 
      KAFKA_LISTENERS: PLAINTEXT://172.23.0.15:9092
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
    volumes:
    - "./kafka2/kafka:/kafka"
    - "./kafka2/logs:/opt/kafka/logs"
    links:
    - zoo1
    - zoo2
    - zoo3

    networks:
      default:
        ipv4_address: 172.23.0.15

  kafka3:
    image: wurstmeister/kafka # 镜像
    restart: always
    container_name: kafka3
    hostname: kafka3
    privileged: true
    ports:
    - 9094:9092
    environment:
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.23.0.16:9092 # 暴露在外的地址
      KAFKA_ADVERTISED_HOST_NAME: 172.23.0.16 # 
      KAFKA_HOST_NAME: kafka3
      KAFKA_ZOOKEEPER_CONNECT: 172.23.0.11:2181,172.23.0.12:2181,172.23.0.13:2181
      KAFKA_ADVERTISED_PORT: 9094 # 暴露在外的端口
      KAFKA_BROKER_ID: 2 # 
      KAFKA_LISTENERS: PLAINTEXT://172.23.0.16:9092
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
    volumes:
    - "./kafka3/kafka:/kafka"
    - "./kafka3/logs:/opt/kafka/logs"
    links:
    - zoo1
    - zoo2
    - zoo3

    networks:
      default:
        ipv4_address: 172.23.0.16

  kafka-manager:
    image: hlebalbau/kafka-manager
    restart: always
    container_name: kafka-manager
    hostname: kafka-manager
    ports:
    - 9000:9000
    links:
    - kafka1
    - kafka2
    - kafka3
    - zoo1
    - zoo2
    - zoo3
    environment:
      ZK_HOSTS: 172.23.0.11:2181,172.23.0.12:2181,172.23.0.13:2181
      KAFKA_BROKERS: 172.23.0.14:9092,172.23.0.15.52.70:9093,172.23.0.16:9094
      APPLICATION_SECRET: letmein
      KAFKA_MANAGER_AUTH_ENABLED: "true" # 开启验证
      KAFKA_MANAGER_USERNAME: "admin" # 用户名
      KAFKA_MANAGER_PASSWORD: "admin" # 密码
      KM_ARGS: -Djava.net.preferIPv4Stack=true
    networks:
      default:
        ipv4_address: 172.23.0.10

networks:
  default:
    external:
      name: zookeeper_network
  1. 启停集群
[root@localhost srv]# docker-compose up -d //在docker-composer.yml所在目录执行 可用-f指定文件

[root@localhost srv]# docker-compose ps
    Name                   Command               State                          Ports                        
-------------------------------------------------------------------------------------------------------------
kafka-manager   /kafka-manager/bin/cmak -D ...   Up      0.0.0.0:9000->9000/tcp                              
kafka1          start-kafka.sh                   Up      0.0.0.0:9092->9092/tcp                              
kafka2          start-kafka.sh                   Up      0.0.0.0:9093->9092/tcp                              
kafka3          start-kafka.sh                   Up      0.0.0.0:9094->9092/tcp                              
zoo1            /docker-entrypoint.sh zkSe ...   Up      0.0.0.0:2181->2181/tcp, 2888/tcp, 3888/tcp, 8080/tcp
zoo2            /docker-entrypoint.sh zkSe ...   Up      0.0.0.0:2182->2181/tcp, 2888/tcp, 3888/tcp, 8080/tcp
zoo3            /docker-entrypoint.sh zkSe ...   Up      0.0.0.0:2183->2181/tcp, 2888/tcp, 3888/tcp, 8080/tcp
  1. 查看zookeeper集群是否正常
[root@localhost srv]# docker exec -ti zoo1 /bin/bash
root@zoo1:/apache-zookeeper-3.6.1-bin# zkServer.sh status  //mode 为leader或follower正常
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: follower
  1. 创建topic
[root@localhost srv]# docker exec -ti kafka1 bash
bash-4.4# kafka-topics.sh --create --zookeeper 172.23.0.11:2181 \
> --replication-factor 1 --partitions 3 --topic tobo

[root@localhost srv]# docker exec -ti kafka2 bash  //验证,每个list理论上都可以看到新建的topic
bash-4.4# kafka-topics.sh --list --zookeeper 172.23.0.13:2181
__consumer_offsets
tobo

bash-4.4# kafka-console-producer.sh --broker-list \
> 172.23.0.13:9092,172.23.0.15:9092,172.23.0.16:9092 \
> --topic tobo  //生产消息
>xiefei
>xiefei
>xiefei
bash-4.4# kafka-console-consumer.sh --bootstrap-server 172.23.0.14:9092, \
> 172.23.0.15:9092,172.23.0.16:9092 --topic tobo --from-beginning  //消费消息

xiefei
xiefei
xiefei
  1. kafka-manager配置
    在这里插入图片描述

在这里插入图片描述

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值