安装 hadoop
环境变量:
export HADOOP_HOME=/home/spark/app/hadoop-2.4.1
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_HOME=/home/spark/app/hadoop-2.4.1
export YARN_CONF_DIR=$YARN_HOME/etc/hadoop
export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
配置:
hadoop-env.sh:
export JAVA_HOME=/usr/lib/jvm/jdk1.8
yarn-env.sh:
export JAVA_HOME=/usr/lib/jvm/jdk1.8
hdfs-site.xml:
<property>
<name>dfs.support.append</name>
<value>true</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
mapred-site.xml:
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
yarn-env.sh:
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>spark02:8088</value>
</property>
core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://spark02:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/spark/app/hadoop-2.4.1/tmp</value>
</property>
spark:
环境变量:
export SPARK_HOME=/home/spark/app/spark-2.0.0-bin-hadoop2.4/
export PATH=$PATH:$SPARK_HOME/bin:$SPARK_HOME/sbin
配置:
文件spark-env.sh :
export JAVA_HOME=/usr/lib/jvm/jdk1.8
export SCALA_HOME=/home/spark/app/scala-2.11.11/
export HADOOP_CONF_DIR=/home/spark/app/hadoop-2.4.1/etc/hadoop/
export SPARK_PID_DIR=/home/spark/app/spark-2.0.0-bin-hadoop2.4/pidtmp
export SPARK_MASTER_IP=spark02
export SPARK_MASTER_PORT=7077
export SPARK_MASTER_WEBUI_PORT=8080
export SPARK_WORKER_CORES=1 #cpu核心数量
export SPARK_WORKER_MEMORY=1024m #生产中要改大,默认1G
export SPARK_WORKER_PORT=7078
export SPARK_WORKER_WEBUI_PORT=8081
export SPARK_WORKER_INSTANCES=1
export YARN_CONF_DIR=/home/spark/app/hadoop-2.4.1/etc/hadoop/
export SPARK_LIBARY_PATH=.:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$HADOOP_HOME/lib/native
#not found sc in spack-shell
export SPARK_LOCAL_IP=spark02
hadoop:
bin/start-all.sh
spark:
bin/start-all.sh
master:
http://ip:8080
worker:
http://ip:8081
task:
http://ip:4040
yarn资源管理界面:
http://ip:8088
hdfs:
http://ip:50070
启动历时服务器
$mr-jobhistory-daemon.sh start historyserver
http://ip:19888
test:
./bin/spark-submit –class org.apache.spark.examples.SparkPi –master yarn –deploy-mode cluster –driver-memory 1G –executor-memory 1G –executor-cores 1 lib/spark-examples-1.6.1-hadoop2.6.0.jar 40