1 download spark-1.6.2-bin-hadoop2.6.tgz, upload to /usr/local
cd /usr/local
tar -zxvf spark-1.6.2-bin-hadoop2.6.tgz
mv spark-1.6.2-bin-hadoop2.6 spark
2. config path
vi ~/.bashrc
add new line: export SPARK_HOME=/usr/local/spark
add to end: export PATH=:$SPARK_HOME/bin
add new line: export CLASSPATH=.:$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
source ~/.bashrc
3. config spark
cd /usr/local/spark/conf
cp spark-env.sh.template spark-env.sh
vi spark-env.sh
add new line: export JAVA_HOME=/usr/java/latest
add new line: export SCALA_HOME=/usr/local/scala
add new line: export SPARK_MASTER_IP=192.168.1.151
add new line: export SPARK_WORK_MEMORY=1g
add new line: export HADOOP_CONF_DIR=/usr/local/hadoop-2.6.5/etc/hadoop
cp slaves.template slaves
vi slaves
remove: localhost
add new line: centos2
add new line: centos3
4. copy spark to centos2 and centos3
scp -r /usr/local/spark root@centos2:/usr/local/
scp -r /usr/local/spark root@centos3:/usr/local/
scp ~/.bashrc root@centos2:~/
scp ~/.bashrc root@centos3:~/
ssh centos2
source ~/.bashrc
ssh centos3
source ~/.bashrc
5. start spark
cd /usr/local/spark/sbin
./start-all.sh (run it only on centos1)
6. check result
centos1: jps => mastar
centos2: jps => worker
centos3: jps => work
http://centos1:8080/