1.修改hadoop-env.sh
export JAVA_HOME=/home/hadoop/jdk
2.修改core-site.xml
<configuration>
<!-- 指定 HDFS 老大(namenode)的通信地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop01:9000</value>
</property>
<!-- 指定 hadoop 运行时产生文件的存储路径 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/data/hadoopdata</value>
</property>
</configuration>
3.修改hdfs-site.xml
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/home/hadoop/data/hadoopdata/name</value>
<description>为了保证元数据的安全一般配置多个不同目录</description>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/home/hadoop/data/hadoopdata/data</value>
<description>datanode 的数据存储目录</description>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
<description>HDFS 的数据块的副本存储个数</description>
</property>
<property>
<name>dfs.secondary.http.address</name>
<value>hadoop02:50090</value>
<description>secondarynamenode 运行节点的信息,和 namenode 不同节点</description>
</property>
</configuration>
4,修改maprd-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
5,修改yarn-site.xml文件
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop03</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
6.修改slaves文件
hadoop01
hadoop02
hadoop03
7.修改好启动之前要进行初始化
hadoop namenode -format
8.在任意节点启动hdfs
start-dfs.sh
9.检测集群是否启动成功:
hadoop fs -ls /
访问hdfs集群的web地址:
http://hadoop01:50070