Maste: c1 10.0.9.1 namenode,ResourceManager,JobTraker
Slave1: c2 10.0.9.2 datanode,NodeManager,TaskTraker
Slave2: c3 10.0.9.3 datanode,NodeManager,TaskTraker
ssh no password login
make authorized keys(for Maste,Slave1,Slave2)
# vi /etc/ssh/sshd_config
RSAAuthentication yes
PubkeyAuthentication yes
# cd /root
# ssh-keygen -t rsa
# cd /root/.ssh
# cat id_rsa.pub >> authorized_keys
merge authorized keys(for Maste)
# cd /root/.ssh
# ssh root@10.0.9.2 cat ~/.ssh/id_rsa.pub >> authorized_keys
# ssh root@10.0.9.3 cat ~/.ssh/id_rsa.pub >> authorized_keys
# scp authorized_keys root@10.0.9.2:/root/.ssh
# scp authorized_keys root@10.0.9.2:/root/.ssh
# reboot
perpare install support
uncompress & install
# rpm -i jdk-8u121-linux-x64.rpm
# tar -zxvf apache-ant-1.10.1-bin.tar.gz
# tar -zxvf apache-maven-3.3.9-bin.tar.gz
# tar -zxvf hadoop-2.7.3.tar.gz
# mv apache-ant-1.10.1 /usr/local/
# mv apache-maven-3.3.9 /usr/local/
# mv hadoop-2.7.3 /usr/local/
set environment
# vi /etc/profile
export JAVA_HOME=/usr/java/default
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib
export ANT_HOME=/usr/local/apache-ant-1.10.1
export MAVEN_HOME=/usr/local/apache-maven-3.3.9
export HADOOP_HOME=/usr/local/hadoop-2.7.3
export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:$MAVEN_HOME/bin:$HADOOP_HOME/bin
configure hadoop
config maste(for Maste)
# cd /usr/local/hadoop-2.7.3/etc/hadoop
# vi core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://10.0.9.1:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/data/hadoop/tmp</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131702</value>
</property>
</configuration>
# vi hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/data/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/data/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>10.0.9.1:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>
# vi mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>10.0.9.1:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>10.0.9.1:19888</value>
</property>
</configuration>
# vi yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>10.0.9.1:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>10.0.9.1:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>10.0.9.1:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>10.0.9.1:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>10.0.9.1:8088</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>768</value>
</property>
</configuration>
# vi hadoop-env.sh
export JAVA_HOME=/usr/java/default
# vi yarn-env.sh
export JAVA_HOME=/usr/java/default
# vi slaves
#localhost
10.0.9.2
10.0.9.3
cp hadoop directory from Maste to Savles
# scp -r /usr/local/hadoop-2.7.3 10.0.9.2:/usr/local/
# scp -r /usr/local/hadoop-2.7.3 10.0.9.3:/usr/local/
run hadoop
initialize name node(for Maste)
# cd /usr/local/hadoop-2.7.3/bin
# ./hdfs namenode -format
start hadoop
# cd /usr/local/hadoop-2.7.3/sbin
# ./start-all.sh (eq ./start-dfs.sh & start-yarn.sh)
# jps (view infomation)
# ./stop-all.sh