conf/core-site.xml:
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value> //这个localhost一定要换成自己的主机名
<description>HDFS的URI,文件系统://namenode标识:端口号</description>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/home/hadoop/hadoop2/data/tmp</value> //临时文件位置,这个文件路径填自己想填的位置
<description>namenode上本地的hadoop临时文件夹</description>
</property>
</configuration>
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/hadoop/hadoop2/data/name</value> //改自己想保存数据的地方 ,下面同理
<description>namenode上存储hdfs名字空间元数据 </description>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/hadoop/hadoop2/data/data</value>
<description>datanode上数据块的物理存储位置</description>
</property>
<property>
<name>dfs.checkpoint.dir</name>
<value>file:/home/hadoop/hadoop2/data/snn</value>
<description>secondary namenode 的位置</description>
</property>
<property>
<name>dfs.checkpoint.edits.dir</name>
<value>file:/home/hadoop/hadoop2/data/snn</value>
<description>secondary namenode 的位置</description>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
<description>副本个数,配置默认是3,应小于datanode机器数量</description>
</property>
</configuration>
conf/mapred-site.xml:
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
<description>Yarn中通过shuffler来传递Map输出到Reduce</description>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>