1> 检查对HDFS的访问(在namenode上操作)
#./hadoop-0.20.2/bin/hadoop fs -ls
2> hadoop中的文件系统操作
#./hadoop-0.20.2/bin/hadoop fs -mkdir test
#./hadoop-0.20.2/bin/hadoop fs -ls test
#./hadoop-0.20.2/bin/hadoop fs -rmr test
3> 测试hadoop
a> 创建子目录
#./hadoop-0.20.2/bin/hadoop fs -mkdir input
b> 把本地文件移动到hdfs
#./hadoop-0.20.2/bin/hadoop fs -put /usr/share/doc/pam-1.1.1/rfc86.0.txt input
#./hadoop-0.20.2/bin/hadoop fs -put /usr/share/doc/pam-1.1.1/Linux-PAM_SAG.txt input
c> 查看文件是否存在
#./hadoop-0.20.2/bin/hadoop fs -ls input
d> 执行计算单词频率的MapReduce作业
#./hadoop-0.20.2/bin/hadoop jar /root/hadoop-0.20.2/hadoop-0.20.2-examples.jar wordcount input output
Found 2 items
drwxr-xr-x - root supergroup 0 2012-11-26 04:25 /user/root/output/_logs
-rw-r--r-- 2 root supergroup 8315 2012-11-26 04:25 /user/root/output/part-r-00000
e> 查看输出结果
#./hadoop-0.20.2/bin/hadoop fs -ls output
#./hadoop-0.20.2/bin/hadoop fs -cat output/part-r-00000 | head -n 13
f> 从HDFS提取输出
#./hadoop-0.20.2/bin/hadoop fs -get output/part-r-00000 output.txt
#./hadoop-0.20.2/bin/hadoop fs -ls
2> hadoop中的文件系统操作
#./hadoop-0.20.2/bin/hadoop fs -mkdir test
#./hadoop-0.20.2/bin/hadoop fs -ls test
#./hadoop-0.20.2/bin/hadoop fs -rmr test
3> 测试hadoop
a> 创建子目录
#./hadoop-0.20.2/bin/hadoop fs -mkdir input
b> 把本地文件移动到hdfs
#./hadoop-0.20.2/bin/hadoop fs -put /usr/share/doc/pam-1.1.1/rfc86.0.txt input
#./hadoop-0.20.2/bin/hadoop fs -put /usr/share/doc/pam-1.1.1/Linux-PAM_SAG.txt input
c> 查看文件是否存在
#./hadoop-0.20.2/bin/hadoop fs -ls input
d> 执行计算单词频率的MapReduce作业
#./hadoop-0.20.2/bin/hadoop jar /root/hadoop-0.20.2/hadoop-0.20.2-examples.jar wordcount input output
Found 2 items
drwxr-xr-x - root supergroup 0 2012-11-26 04:25 /user/root/output/_logs
-rw-r--r-- 2 root supergroup 8315 2012-11-26 04:25 /user/root/output/part-r-00000
e> 查看输出结果
#./hadoop-0.20.2/bin/hadoop fs -ls output
#./hadoop-0.20.2/bin/hadoop fs -cat output/part-r-00000 | head -n 13
f> 从HDFS提取输出
#./hadoop-0.20.2/bin/hadoop fs -get output/part-r-00000 output.txt