kafka_2.13-2.8.2傻瓜式部署脚本

本文介绍了一个自动化脚本,用于在具有root权限的环境中部署Kafka集群,包括设置节点IP、网卡接口、JDK和Kafka版本,以及配置ACL认证。脚本提供了必要的步骤和命令,确保集群节点间的互通,并在安装过程中提示用户输入必要的配置参数。
摘要由CSDN通过智能技术生成
脚本介绍

脚本执行条件:
1、需要root权限
2、将集群三个节点IP分别填入脚本变量“node1-3IP”
3、将网卡如“ens33”填入脚本变量“NIC_port”
4、下载 jdk-8u161-linux-x64.tar.gz 放到root目录下
5、集群3个节点可以互通
脚本函数:
1、脚本提供ACL认证配置函数,如需配置ACL认证请调用函数
2、如需修改用户认证信息请变更脚本变量“username”、“password”

以上条件完成后直接执行脚本就可以,脚本执行完成后输出配置信息。
启动kafka请先启动zookeeper,先执行zk_start.sh在执行kafka_start.sh。

#!/bin/bash

## install version jdk1.8.0_161 kafka_2.13-2.8.2

err_dis() { if [ $? != 0 ];then echo $1 !;exit 1;fi }

# node IP
node1IP=''
node2IP=''
node3IP=''
NIC_port=''
kafka_path='/usr/local/kafka_2.13-2.8.2'
username='admin'
password='password'

# Check the NIC information to prevent misoperations
ip a | grep '^[0-9]' | awk -F "[ :]" '{print $3}' | grep -w $NIC_port &>/dev/null
err_dis 'Please modify the script variables "NIC_port"'
localhost=`ip a | grep -w $NIC_port | awk 'NR==2{print $2}' | awk -F "/" '{print $1}'`

# Prompt enter node ip address
echo "$node1IP $node2IP $node3IP" | grep -w $localhost &>/dev/null
err_dis 'Please modify the script variables "nodeIP" enter local ip !'
ping $node1IP -c 1 &>/dev/null
err_dis "host $node1IP is unreachable, modify script variable 'node1IP'"
ping $node2IP -c 1 &>/dev/null
err_dis "host $node2IP is unreachable, modify script variable 'node2IP'"
ping $node3IP -c 1 &>/dev/null
err_dis "host $node3IP is unreachable, modify script variable 'node3IP'"


# Download jdk-8u161-linux-x64.tar.gz
if [ ! -f "/root/jdk-8u161-linux-x64.tar.gz" ];then
        echo '/root/jdk-8u161-linux-x64.tar.gz !'
        echo 'https://www.oracle.com/java/technologies/downloads/archive/'
        exit 1
fi

# Download kafka_2.13-2.8.2.tgz
cd /root
# wget https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/2.8.2/kafka_2.13-2.8.2.tgz
wget https://archive.apache.org/dist/kafka/2.8.2/kafka_2.13-2.8.2.tgz
err_dis 'Check whether wget is installed,or manually download and upload it to the /root directory'

# Install JDK1.8
tar zxf jdk-8u161-linux-x64.tar.gz
mv jdk1.8.0_161 /usr/local
chown root.root -R /usr/local/jdk1.8.0_161

# Install kafka_2.13-2.8.2
tar zxf kafka_2.13-2.8.2.tgz
mv kafka_2.13-2.8.2 /usr/local/
chown root.root -R $kafka_path

# Create data and log directories
mkdir -p $kafka_path/{kafka_data,zk_data}

# zookeeper configure
mv $kafka_path/config/zookeeper.properties $kafka_path/config/zookeeper.properties.bak
cat > $kafka_path/config/zookeeper.properties << EOF
#### zk数据目录 ###
dataDir=$kafka_path/zk_data

clientPort=2161
maxClientCnxns=0
tickTime=2000
initLimit=5
syncLimit=2

### zkserver配置 ###
server.1=$node1IP:2266:3266
server.2=$node2IP:2266:3266
server.3=$node3IP:2266:3266
EOF

# kafka configure
mv $kafka_path/config/server.properties $kafka_path/config/server.properties.bak
cat > $kafka_path/config/server.properties << EOF
broker.id=0
listeners=PLAINTEXT://$localhost:9002

### 副本数量 ###
default.replication.factor=2

num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600

### 数据存储目录 ####
log.dirs=$kafka_path/kafka_data

### 分区数 ###
num.partitions=1

num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000

### 连接zk ###
zookeeper.connect=$node1IP:2161,$node2IP:2161,$node3IP:2161
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
EOF

# zookeeper myid
if [ "$localhost" == $node1IP ];then echo '1' > $kafka_path/zk_data/myid;fi
if [ "$localhost" == $node2IP ];then echo '2' > $kafka_path/zk_data/myid;fi
if [ "$localhost" == $node3IP ];then echo '3' > $kafka_path/zk_data/myid;fi

# kafka broker.id
if [ "$localhost" == $node1IP ];then sed -i "1s:\(=\).*:\10:" $kafka_path/config/server.properties;fi
if [ "$localhost" == $node2IP ];then sed -i "1s:\(=\).*:\11:" $kafka_path/config/server.properties;fi
if [ "$localhost" == $node3IP ];then sed -i "1s:\(=\).*:\12:" $kafka_path/config/server.properties;fi

# Add global environment variables
# You are advised to add global environment variables; otherwise, files under bin may fail to be executed
echo 'export PATH=/usr/local/jdk1.8.0_161/bin:$PATH' >> /etc/profile
source /etc/profile

# zookeeper startup script zk_start.sh
cat > $kafka_path/zk_start.sh << EOF
#!/bin/bash
nohup $kafka_path/bin/zookeeper-server-start.sh $kafka_path/config/zookeeper.properties >> $kafka_path/zk_nohup.out 2>&1 &
EOF
chmod 744 $kafka_path/zk_start.sh

# kafka startup script kafka_start.sh
cat > $kafka_path/kafka_start.sh << EOF
#!/bin/bash
nohup $kafka_path/bin/kafka-server-start.sh $kafka_path/config/server.properties >> $kafka_path/kafka_nohup.out 2>&1 &
EOF
chmod 744 $kafka_path/kafka_start.sh

# Add the service to start automatically
echo "/bin/sh $kafka_path/zk_start.sh" >> /etc/rc.local
echo "/bin/sh $kafka_path/kafka_start.sh" >> /etc/rc.local

# To configure the ACL, call the function
acl(){
# Adding an acl Configuration
cat > $kafka_path/config/kafka_server_jaas.conf << EOF
KafkaServer {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    username="$username"
    password="$password"
    user_$username="$password";
};
EOF

# Add kafka Security Configuration
sed -i "2a\\
auto.create.topics.enable=false\\
delete.topic.enable=false\\
listeners=SASL_PLAINTEXT://$localhost:9002\\
security.inter.broker.protocol=SASL_PLAINTEXT\\
sasl.mechanism.inter.broker.protocol=PLAIN\\
sasl.enabled.mechanisms=PLAIN\\
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer\\
super.users=User:admin" $kafka_path/config/server.properties
sed -i '2d' $kafka_path/config/server.properties

# Add startup script environment variables
sed -i "2a export KAFKA_OPTS=\" -Djava.security.auth.login.config=$kafka_path/config/kafka_server_jaas.conf \"" $kafka_path/kafka_start.sh
}


# echo
echo '---------------------------------------------------------------------------------------------------'
echo '// Installation complete! Please modify the configuration!'
echo '---------------------------------------------------------------------------------------------------'
echo "// $kafka_path/config/zookeeper.properties"
cat $kafka_path/config/zookeeper.properties | grep -Ew 'server.[0-9]|zkserver配置'
echo '---------------------------------------------------------------------------------------------------'
echo "// $kafka_path/config/server.properties"
cat $kafka_path/config/server.properties | grep -Ew 'zookeeper.connect|连接zk'
echo '---------------------------------------------------------------------------------------------------'
echo "// $kafka_path/zk_start.sh"
echo "// $kafka_path/kafka_start.sh"
echo '---------------------------------------------------------------------------------------------------'


# clean
cd /root
rm -f ./jdk-8u161-linux-x64.tar.gz
rm -f ./kafka_2.13-2.8.2.tgz
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值