spark-2.1.1高可用部署

1、解压文件到指定目录

tar zxf scala-2.11.8.tgz -C /data/
tar zxf spark-2.1.1-bin-hadoop2.7.tgz  -C /data/

2、修改配置文件

cd /data/spark-2.1.1-bin-hadoop2.7/conf
cp spark-env.sh.template spark-env.sh
cp slaves.template slaves

vim spark-env.sh
##ssh端口不是22时
#export SPARK_SSH_OPTS="-p 54213"
#export HADOOP_SSH_OPTS="-p 54213"

export JAVA_HOME=/data/jdk1.8.0_101
export SCALA_HOME=/data/scala-2.11.8
export HADOOP_HOME=/data/hadoop/hadoop-2.7.2
export SPARK_HOME=/data/spark-2.1.1-bin-hadoop2.7
export SPARK_MASTER_PORT=7077
export SPARK_MASTER_WEBUI_PORT=17077
export SPARK_WORKER_WEBUI_PORT=17078
export SPARK_WORKER_DIR=$SPARK_HOME/work
export SPARK_CLASSPATH=/usr/share/java/mysql-connector-java.jar
export HADOOP_CONF_DIR=/data/hadoop/hadoop-2.7.2/etc/hadoop
export YARN_CONF_DIR=/data/hadoop/hadoop-2.7.2/etc/hadoop
export HADOOP_COMMON_LIB_NATIVE_DIR="/data/hadoop/hadoop-2.7.2/share/hadoop/common/lib"
export HADOOP_COMMON_HOME="$HADOOP_HOME"
export PATH=${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${SCALA_HOME}/bin:${SPARK_HOME}/bin:$PATH
export LD_LIBRARY_PATH=.:$JAVA_HOME/lib:$JAVA_HOME/jar/lib:$HADOOP_HOME/lib/native:$SPARK_HOME/jars
export SPARK_HISTORY_OPTS="-Dspark.history.ui.port=17079 -Dspark.history.retainedApplications=50 -Dspark.history.fs.logDirectory=hdfs://cluster1/spark2yuansheng"
export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=hadoop-4:2181,hadoop-5:2181,hadoop-6:2181 -Dspark.deploy.zookeeper.dir=/dirspark"


mv spark-defaults.conf.template spark-defaults.conf
vim spark-defaults.conf
spark.eventLog.enabled=true
spark.eventLog.dir=hdfs://cluster1/spark2yuansheng
spark.yarn.jars=local:/data/spark-2.1.1-bin-hadoop2.7/jars/*
spark.driver.extraLibraryPath=/data/hadoop/hadoop-2.7.2/lib/native
spark.executor.extraLibraryPath=/data/hadoop/hadoop-2.7.2/lib/native
spark.yarn.am.extraLibraryPath=/data/hadoop/hadoop-2.7.2/lib/native
spark.history.retainedApplications=50
spark.history.fs.logDirectory=hdfs://cluster1/spark2yuansheng spark2yuanshengspark.history.ui.port=18089
spark.deploy.recoveryMode=ZOOKEEPER
spark.deploy.zookeeper.url=hadoop-4:2181,hadoop-5:2181,hadoop-6:2181
spark.deploy.zookeeper.dir=/dirspark

3、创建spark家目录

hadoop dfs -mkdir   /spark2yuansheng

4、启动测试

sh start-all.sh
cp /data/apache-hive-1.2.1-bin/conf/hive-site.xml /data/spark-2.1.1-bin-hadoop2.7/conf/
hadoop jar /data/hadoop/hadoop-2.7.2/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.2.jar pi 2 5
cd  /data/spark-2.1.1-bin-hadoop2.7/bin
sh spark-submit --class org.apache.spark.examples.SparkPi --master spark://hadoop-1:7077 /data/spark-2.1.1-bin-hadoop2.7/examples/jars/spark-examples_2.11-2.1.1.jar 100
sh start-history-server.sh
sh start-thriftserver.sh --hiveconf hive.server2.thrift.port=10000 --master spark://hadoop-1:7077  --executor-memory 1g --total-executor-cores 1
jps

sh stop-all.sh
sh stop-thriftserver.sh
sh stop-history-server.sh

5、修改配置文件

hadoop-1:
cd /data/spark-2.1.1-bin-hadoop2.7/conf
vim slaves   (修改所有节点的)
hadoop-1
hadoop-2
hadoop-3
hadoop-4
hadoop-5
hadoop-6

6、拷贝文件到其他服务器

scp  -r  scala-2.11.8/ work@hadoop-2:/data
scp  -r  scala-2.11.8/ work@hadoop-3:/data
scp  -r  scala-2.11.8/ work@hadoop-4:/data
scp  -r  scala-2.11.8/ work@hadoop-5:/data
scp  -r  scala-2.11.8/ work@hadoop-6:/data

scp -r  spark-2.1.1-bin-hadoop2.7/ work@hadoop-2:/data
scp -r  spark-2.1.1-bin-hadoop2.7/ work@hadoop-3:/data
scp -r  spark-2.1.1-bin-hadoop2.7/ work@hadoop-4:/data
scp -r  spark-2.1.1-bin-hadoop2.7/ work@hadoop-5:/data
scp -r  spark-2.1.1-bin-hadoop2.7/ work@hadoop-6:/data

7、启动

hadoop-1:
sh start-all.sh

hadoop-2:
sh start-master.sh

hadoop-1:
sh start-history-server.sh

正式环境不启动:
sh start-thriftserver.sh \
--master spark://hadoop-1:7077,hadoop-2:7077 \
--deploy-mode client \
--executor-memory 2g \
--executor-cores 1 \
--num-executors 2 \
--total-executor-cores 1 \
--hiveconf hive.server2.thrift.port=10000
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

平凡似水的人生

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值