----------------------------说明-----------------------------------
HelloHadoopV3
此程序再次利用了HelloHadoopV2的map,reduce函数,并且自动将文件上传到hdfs上,并自动取回结果
同时有提示信息,参数输入与打印运算时间的功能
测试方法:
将该程序运行在hadoop0.21.0的伪分布式系统上(本人的单击版http://freewxy.iteye.com/blog/1027569)
---------------------------
hadoop jar HelloHadoopV3.jar /home/$yourname/input /home/$yourname/output-hh3
---------------------------
注意:
1.第一个输入的参数是在local上的输入文件夹,确认此文件夹内有文件并且没有子目录
2.第二个输入的参数是在local上的运算结果文件夹,由程序产生,不用事先建立,如有请删除之
--------------------------------------------------------------------
package HelloHadoopV3;
import java.io.IOException;
import java.io.InterruptedIOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import CheckAndDelete.CheckAndDelete;
import GetFromHdfs.GetFromHdfs;
import HelloHadoopV2.HelloMapperV2;
import HelloHadoopV2.HelloReducerV2;
import PutToHDFS.PutToHdfs;
public class HelloHadoopV3 {
public static void main(String[] args)
throws IOException,InterruptedIOException,ClassNotFoundException, InterruptedException{
String hdfs_input = "HH3_input";
String hdfs_output = "HH3_output";
Configuration conf = new Configuration();
// 获取参数
String[] otherArgs = new GenericOptionsParser(conf,args).getRemainingArgs();
//如果参数数量不为2,则打印出提示信息
if(otherArgs.length != 2){
System.err.println("Usage:hadoop jar HelloHadoopV3.jar <local_input> <>");
System.exit(2);
}
Job job = new Job(conf,"Hadoop Hello World");
job.setJarByClass(HelloHadoopV3.class);
//设置map and reduce以及 combiner class
job.setMapperClass(HelloMapperV2.class);
job.setCombinerClass(HelloReducerV2.class);
job.setReducerClass(HelloReducerV2.class);
//设定map的输出类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
//设定reduce的输出类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
//用checkAndDelete函数防止overhead的错误
CheckAndDelete.checkAndDelete(hdfs_input,conf);
CheckAndDelete.checkAndDelete(hdfs_output, conf);
//将文件放到hdfs上
PutToHdfs.putToHdfs(args[0],hdfs_input,conf);
//设定hdfs的输出源路径
FileInputFormat.addInputPath(job,new Path(hdfs_input));
FileOutputFormat.setOutputPath(job,new Path(hdfs_output));
long start = System.nanoTime();
job.waitForCompletion(true);
//把hdfs上的结果取出来
GetFromHdfs.getFromHdfs(hdfs_output,args[1],conf);
boolean status = job.waitForCompletion(true);
//计算时间
if(status){
System.err.println("Integrate Alert Job Finished!");
long time = System.nanoTime()-start;
System.err.println(time*(1E-9)+" secs.");
}else{
System.err.println("Integrate Alter Job Failed!");
System.exit(1);
}
}
}
执行:
$ bin/hadoop jar HelloHadoopV3.jar /home/hadoop/input /home/hadoop/output_hh3
结果: