# 1 mkdir conf
mkdir conf
# 2 create file zoo.cfg
cat > conf/zoo.cfg <<-"EOF"
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/opt/zookeeper-3.4.13/data
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000
zookeeper.sasl.client=true
EOF
# 3 create file server_jaas.conf
cat > conf/server_jaas.conf <<-"EOF"
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="12345678";
};
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="12345678"
user_super="12345678"
user_admin="12345678";
};
KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="12345678"
user_admin="12345678";
};
KafkaClient {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="12345678";
};
EOF
# 4 create file .env
cat > .env <<-"EOF"
HOSTIP=10.10.10.1
KAFKA_PORT=9092
EOF
# 5 create file docker-compose.yaml
cat > docker-compose.yml <<-"EOF"
name: "P1"
version: "3"
networks:
net:
driver: bridge
services:
zookeeper_sasl:
image: wurstmeister/zookeeper
networks:
- net
restart: always
environment:
SERVER_JVMFLAGS: '-Djava.security.auth.login.config=/opt/zookeeper-3.4.13/secrets/server_jaas.conf'
volumes:
- ./conf/zoo.cfg:/opt/zookeeper-3.4.13/conf/zoo.cfg
- ./conf/server_jaas.conf:/opt/zookeeper-3.4.13/secrets/server_jaas.conf
ports:
- "2181:2181"
deploy:
resources:
limits:
cpus: '4'
memory: 4G
reservations:
cpus: '0.5'
memory: 200M
kafka_sasl:
image: wurstmeister/kafka
networks:
- net
restart: always
environment:
KAFKA_OPTS: '-Djava.security.auth.login.config=/opt/kafka/secrets/server_jaas.conf'
KAFKA_BROKER_ID: 0
KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://${HOSTIP}:${KAFKA_PORT}
KAFKA_ADVERTISED_PORT: ${KAFKA_PORT}
KAFKA_LISTENERS: SASL_PLAINTEXT://0.0.0.0:${KAFKA_PORT}
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_PLAINTEXT
KAFKA_PORT: ${KAFKA_PORT}
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
KAFKA_SUPER_USERS: User:admin
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: false
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper_sasl:2181'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
volumes:
- ./conf/server_jaas.conf:/opt/kafka/secrets/server_jaas.conf
ports:
- "${KAFKA_PORT}:${KAFKA_PORT}"
deploy:
resources:
limits:
cpus: '4'
memory: 4G
reservations:
cpus: '0.5'
memory: 200M
depends_on:
- zookeeper_sasl
EOF
docker-compose创建kafka支持SASL_PLAINTEXT
于 2023-12-13 12:02:39 首次发布