spark
master
1. 编写hosts
vi /etc/hosts
10.117.0.12 master
10.117.0.12 slave1
10.117.0.15 slave2
分发hosts
scp -r /etc/hosts slave1:/etc/
scp -r /etc/hosts slave2:/etc/
2. 安装解压scala并配置profile
cd /opt/soft/
mkdir -p /usr/scala
tar -zxvf scala-2.11.12.tgz -C /usr/scala/
cd /usr/scala/scala-2.11.12/
vim /etc/profile
export SCALA_HOME=/usr/scala/scala-2.11.12
export PATH=$SCALA_HOME/bin:$PATH
source /etc/profile
scala -version
分发软件和配置文件
scp -r /usr/scala root@slave1:/usr/
scp -r /etc/profile root@slave1:/etc/
scp -r /usr/scala root@slave2:/usr/
scp -r /etc/profile root@slave2:/etc/
slave2 slave 3
source /etc/profile
scala -version
master
3. 安装解压spark
cd /opt/soft/
mkdir -p /usr/spark
tar -zxvf spark-2.4.0-bin-hadoop2.7.tgz -C /usr/spark/
进入配置文件目录usr/spark/spark-2.4.0-bin-hadoop2.7/conf/
开始配置
cd /usr/spark/spark-2.4.0-bin-hadoop2.7/conf/
配置spark-env.sh
cp spark-env.sh.template spark-env.sh
vim spark-env.sh
export SPARK_MASTER_IP=master
export SCALA_HOME=/usr/scala/scala-2.11.12
export SPARK_WORKER_MEMORY=8g
export JAVA_HOME=/usr/java/jdk1.8.0_171
export HADOOP_HOME=/usr/hadoop/hadoop-2.7.3
export HADOOP_CONF_DIR=/usr/hadoop/hadoop-2.7.3/etc/hadoop
配置slaves
cp slaves.template slaves
vim slaves
slave1
slave2
配置master
master
vim master
master
配置profile
vim /etc/profile
export SPARK_HOME=/usr/spark/spark-2.4.0-bin-hadoop2.7
export PATH=$SPARK_HOME/bin:$PATH
source /etc/profile
分发配置文件、软件、profile
scp -r /usr/spark root@slave1:/usr/
scp -r /usr/spark root@slave2:/usr/
scp -r /etc/profile root@slave1:/etc/
scp -r /etc/profile root@slave2:/etc/
slave1:
source /etc/profile