# 配置系统
conf/core-site.xml:
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>
conf/hdfs-site.xml:
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
conf/mapred-site.xml:
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>localhost:9001</value>
</property>
</configuration>
# 配置SSH
$ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
$ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
# 格式化
$ bin/Hadoop namenode -format
# 启动服务
$ bin/start-all.sh
# 浏览服务
NameNode - http://localhost:50070/
JobTracker - http://localhost:50030/
# 复制文件
$ bin/hadoop fs -put conf input
# 运行程序
$ bin/hadoop jar hadoop-0.20.2-examples.jar grep input output 'dfs[a-z.]+'
# 查看输出
$ bin/hadoop fs -get output output
$ cat output/*
$ bin/hadoop fs -cat output/*
# 停止服务
$ bin/stop-all.sh
推荐阅读文章