1、解压
tar -zxvf /tmp/R5LZG/hadoop-2.7.3.tar.gz
2、在系统中加入环境变量
vi /etc/profile
export HADOOP_HOME=/usr/local/hadoop-2.7.3
3、vi hadoop-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk.i386
4、cd /usr/local/hadoop-2.7.3/etc/hadoop
vi hadoop-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk.i386
5、vi core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop1:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop-2.7.3/tmp</value>
</property>
</configuration>
6 、vi hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>//usr/local/hadoop-2.7.3/data/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/usr/local/hadoop-2.7.3/data/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.secondary.http.address</name>
<value>hadoop1:50090</value>
</property>
</configuration>
7、vi mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
8、 vi yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop1</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
9、vi salves
hadoop1
hadoop2
hadoop3
10、scp到集群的其他机器
11、hadoop namenode -format
12、启动
start-dfs.sh
start-yarn.sh
13 测试
hadoop fs -mkdir -p /aaa/bbb
随便上传个什么东西上去:hadoop fs -put /usr/local/hadoop-2.7.3/etc/hadoop/salves /aaa/bbb
查看:hadoop fs -ls /