六、Flink高可用部署

(1)下载安装包并解压到指定目录

(2)修改配置文件,添加必要依赖

(3)分发到其他节点,配置环境变量

以dataxc用户为例,脚本示例如下:

#!/bin/bash
# ins-flink.sh

nodes=(n101 n102 n103)
zk_connect='n101:2181,n102:2181,n103:2181'

#解压flink到程序目录
cd /home/dataxc/sw && tar -zxvf flink-1.14.4-bin-scala_2.11.tgz -C /home/dataxc/opt

echo -e "\n
#高可用
high-availability: zookeeper
high-availability.zookeeper.quorum: $zk_connect
high-availability.zookeeper.path.root: /flink
high-availability.storageDir: hdfs:///flink/ha
yarn.application-attempts: 3
#禁止类加载器泄露检查(hadoop3必须禁止,疑似bug)
classloader.check-leaked-classloader: false
#故障转移策略
restart-strategy: failure-rate
restart-strategy.failure-rate.max-failures-per-interval: 3
restart-strategy.failure-rate.failure-rate-interval: 5 min
restart-strategy.failure-rate.delay: 10 s
#检查点策略
execution.checkpointing.interval: 1min
execution.checkpointing.externalized-checkpoint-retention: RETAIN_ON_CANCELLATION
execution.checkpointing.max-concurrent-checkpoints: 1
execution.checkpointing.min-pause: 0ms
execution.checkpointing.mode: EXACTLY_ONCE
execution.checkpointing.timeout: 10min
execution.checkpointing.tolerable-failed-checkpoints: 3
execution.checkpointing.unaligned: true
#状态存储策略
state.backend: rocksdb
state.backend.incremental: true
state.checkpoints.num-retained: 3
state.checkpoints.dir: hdfs:///flink/checkpoints
state.savepoints.dir: hdfs:///flink/savepoints
#数据缓存可使用内存
taskmanager.memory.network.fraction: 0.1
taskmanager.memory.network.min: 64mb
taskmanager.memory.network.max: 1gb
#JM和TM之间发送的消息的最大大小,默认10M
akka.framesize: 100m
" >> /home/dataxc/opt/flink-1.14.4/conf/flink-conf.yaml

#指定当地时区
sed -i "/export FLINK_ENV_JAVA_OPTS=/ s/\${FLINK_ENV_JAVA_OPTS_TM}/\${FLINK_ENV_JAVA_OPTS_TM} -Duser.timezone='Asia/Shanghai'/" /home/dataxc/opt/flink-1.14.4/bin/{taskmanager.sh,jobmanager.sh}
sed -i 's!%d{yyyy-MM-dd HH:mm:ss,SSS}!%d{yyyy-MM-dd HH:mm:ss,SSS}{Asia/Shanghai}!' /home/dataxc/opt/flink-1.14.4/conf/log4j.properties

#添加需要的依赖
cp /home/dataxc/sw/{flink-connector-jdbc_2.11-1.14.4.jar,postgresql-42.2.23.jar,mysql-connector-java-8.0.12.jar,flink-sql-connector-kafka_2.11-1.14.4.jar} /home/dataxc/opt/flink-1.14.4/lib
cp $HADOOP_HOME/share/hadoop/yarn/hadoop-yarn-api-*.jar /home/dataxc/opt/flink-1.14.4/lib

:<<!
#分发到其他节点
for node in ${nodes[*]:1}
	do
		scp -r /home/dataxc/opt/flink-1.14.4 dataxc@$node:/home/dataxc/opt/flink-1.14.4
	done

#添加环境变量(添加HADOOP_CLASSPATH前,需确保hadoop部署完成)
for node in ${nodes[*]}
	do
		ssh dataxc@$node 'sed -i -e "/^export JAVA_HOME=/a export FLINK_HOME=/home/dataxc/opt/flink-1.14.4" \
		-e "/^export PATH=/ s/$/\:\$FLINK_HOME\/bin/" \
		-e "/^export PATH=/a export HADOOP_CLASSPATH=\`hdfs classpath\`" /home/dataxc/.bashrc;
		source /home/dataxc/.bashrc'
	done
!

#---yarn-session---
#yarn-session.sh -nm flink-test -jm 1024 -tm 1024 -d

#---yarn-per-job---
#flink run -m yarn-cluster -ynm flink-test -yjm 1024 -ytm 1024 -py test.py

#end

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值