基于docker-compose部署kafka、SASL模式【密码校验模式】

基础部署组件

  1. Zookeeper 注册中心
  2. Kafka 消息中心
  3. Kafka-ui 可视化web界面

创建目录

#进入到主目录
cd /usr/localhost
#创建docker文件夹
mkdir docker
#进入docker文件夹
cd ./docker
#创建kafka文件夹
mkdir kafka

docker-compose.yaml配置

#在kafka文件夹下创建docker-compose.yaml
vi docker-compose.yaml

复制一下内容到docker-compose.yaml

# 版本根据你的docker版本来的,目前主流应该都是3.几的版本
version: '3.8'
services:
  zookeeper:
    image: wurstmeister/zookeeper
    volumes:
      - ./secrets/:/opt/secrets/
      - ./zookeeper/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg
    container_name: zookeeper
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
      SERVER_JVMFLAGS: -Djava.security.auth.login.config=/opt/secrets/server_jaas.conf
    ports:
      - 2181:2181
    restart: always
  kafka:
    image: wurstmeister/kafka
    container_name: kafka
    depends_on:
      - zookeeper
    ports: 
      - 9092:9092
    volumes:
      - ./secrets/:/opt/secrets/
    environment:
      KAFKA_BROKER_ID: 0
      KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://192.168.1.20:9092
      KAFKA_ADVERTISED_PORT: 9092 
      KAFKA_LISTENERS: SASL_PLAINTEXT://0.0.0.0:9092
      KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_PLAINTEXT
      KAFKA_PORT: 9092 
      KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
      KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
      KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
      KAFKA_SUPER_USERS: User:admin
      KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" #设置为true,ACL机制为黑名单机制,只有黑名单中的用户无法访问,默认为false,ACL机制为白名单机制,只有白名单中的用户可以访问
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
      KAFKA_HEAP_OPTS: "-Xmx512M -Xms16M"
      KAFKA_OPTS: -Djava.security.auth.login.config=/opt/secrets/server_jaas.conf
  kafka-ui:
    image: provectuslabs/kafka-ui:latest
    container_name: kafka-ui
    restart: always
    ports:
        - 10010:8080
    environment:
        - DYNAMIC_CONFIG_ENABLED=true
        - SERVER_SERVLET_CONTEXT_PATH=/kafka-ui
        - KAFKA_CLUSTERS_0_NAME=local
        - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
        - KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_PLAINTEXT
        - KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN
        - KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="123456";
    depends_on:
      - zookeeper
      - kafka

server_jaas.conf配置

#在kafka文件夹下创建一个secrets文件夹
mkdir secrets
#进入secrets文件夹
cd secrets
#创建server_jaas.conf文件
vi server_jaas.conf

复制以下内容到server_jaas.conf

Client {
    org.apache.zookeeper.server.auth.DigestLoginModule required
    username="admin"
    password="123456";
};
 
 
Server {
    org.apache.zookeeper.server.auth.DigestLoginModule required
    username="admin"
    password="123456"
    user_super="123456"
    user_admin="123456";
};
 
KafkaServer {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    username="admin"
    password="123456"  
    user_admin="123456";
};
 
KafkaClient {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    username="admin"
    password="123456";
};

zoo.cfg配置

#在kafka文件夹下创建一个zookeeper文件夹
mkdir zookeeper
#进入zookeeper文件夹
cd zookeeper
#创建zoo.cfg文件
vi zoo.cfg

复制以下内容到zoo.cfg

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/opt/zookeeper-3.4.13/data
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1
 
## 开启SASl关键配置
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000
zookeeper.sasl.client=true

启动运行docker-compose

docker-compose up -d

等待镜像下载完成,并且运行成功

访问kafka-ui界面

http://localhost:10010/kafka-ui

  • 7
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Ya.mo

感谢你的打赏支持

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值