1、tar zxvf sparkXXX
2、mv mv slaves.template slaves
hadoop2
hadoop3
3、mv spark-env.sh.template spark-env.sh
SPARK_MASTER_HOST=hadoop1
SPARK_MASTER_PORT=7077
4、vi spark-config.sh
export JAVA_HOME=/root/apps/jdk1.7.0_80
scp -r spark-2.1.1-bin-hadoop2.7 root@hadoop2:/root/apps/
scp -r spark-2.1.1-bin-hadoop2.7 root@hadoop3:/root/apps/
sbin/start-all.sh
sbin/stop-all.sh
5、 http://hadoop1:8080/
6、bin/spark-shell --master spark://hadoop1:7077
7、sc.textFile("./LICENSE").flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).collect
8、start-dfs.sh
9、hdfs dfs -mkdir /directory
10、sbin/stop-all.sh
11、mv spark-defaults.conf.template spark-defaults.conf
12、vi spark-defaults.conf
spark.eventLog.enabled true
spark.eventLog.dir hdfs://hadoop1:9000/directory
13、vi spark-env.sh
export SPARK_HISTORY_OPTS="-Dspark.history.ui.port=4000
-Dspark.history.retainedApplications=3
-Dspark.history.fs.logDirectory=hdfs://hadoop1:9000/directory"
scp -r spark-2.1.1-bin-hadoop2.7 root@hadoop2:/root/apps/
scp -r spark-2.1.1-bin-hadoop2.7 root@hadoop3:/root/apps/
14、sbin/start-all.sh
sbin/start-history-server.sh
http://hadoop1:4000/
15、HA
vi conf/spark-env.sh
#SPARK_MASTER_HOST=hadoop1
export SPARK_DAEMON_JAVA_OPTS="
-Dspark.deploy.recoveryMode=ZOOKEEPER
-Dspark.deploy.zookeeper.url=hadoop1:2181,hadoop2:2181,hadoop3:2181
-Dspark.deploy.zookeeper.dir=/spark"
scp -r spark-2.1.1-bin-hadoop2.7 root@hadoop2:/root/apps/
scp -r spark-2.1.1-bin-hadoop2.7 root@hadoop3:/root/apps/
16、bin/zkServer.sh start
[root@hadoop1 spark-2.1.1-bin-hadoop2.7]# sbin/start-all.sh
[root@hadoop2 spark-2.1.1-bin-hadoop2.7]# sbin/start-master.sh
17、bin/spark-shell --master spark://hadoop1:7077,hadoop2:7077
//执行的第一个spark程序
bin/spark-submit --class org.apache.spark.examples.SparkPi --master spark://hadoop1:7077 --executor-memory 1G --total-executor-cores 2 /root/apps/spark-2.1.1-bin-hadoop2.7/examples/jars/spark-examples_2.11-2.1.1.jar 100
18、
安装everything(删除maven下载失败的依赖)
*.lastUpdated