我的安装目录: /Users/Dery/Hadoop/hadoop-2.6.2
export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents /Home
2. 伪分布模式
<name>hadoop.tmp.dir</name>
<value>/Users/Dery/Hadoop/hadoop-2.6.2/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value>
</property>
(2) 配置yarn-site.xml
<configuration></configuration>之间增加如下内容:
<property>
<name>yarn.noCHdemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name><value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>Master</value>
</property>
(3) 创建和配置mapred-site.xml
默认/etc/hadoop文件夹下有mapred.xml.template文件,复制更改用户名mapred.xml
<configuration></configuration>之间增加如下内容:
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapred.job.tracker</name>
<value>localhost:8021</value>
</property>
<property>
<name>mapred.tasktracker.map.tasks.maximum</name>
<value>2</value>
</property>
<property>
<name>mapred.tasktracker.reduce.tasks.maximum</name>
<value>2</value>
</property>
(4) 配置hdfs-site.xml
指定主机上作为namenode和datanode的目录:
/Users/Dery/Hadoop/hadoop-2.6.2/hdfs/name
/Users/Dery/Hadoop/hadoop-2.6.2/hdfs/data
<configuration></configuration>之间增加如下内容:
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/Users/Dery/Hadoop/hadoop-2.6.2/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/Users/Dery/Hadoop/hadoop-2.6.2/hdfs/data</value>
</property>
(5) 格式化hdfs
hdfs namenode -format
(6) 启动hadoop
cd /Users/Dery/Hadoop/hadoop-2.6.2/sbin
./start-dfs.sh
./start-yarn.sh
浏览器打开 http://localhost:50070/,会看到hdfs管理页面
浏览器打开 http://localhost:8088/,会看到hadoop进程管理页面
(7) WordCount验证
创建input目录
hadooop fs -mkdir input
fs:hadoop下的文件命令,和普通命令基本一致
拷贝文件
hadooop fs -copyFromLocal README.txt input
hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.0.jar wordcount input output
cat output/*
注:成功看到结果