- 首先你的类得extends Configured implements Tool,并且实现tool的run方法。
- 下面我把代码贴出来,如图
最后在完成后不要忘记 job.setJarByClass(WordCountApp.class);写这一句代码,负责会报错找不到类。package cmd; import java.io.IOException; import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Mapper.Context; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; public class WordCountApp extends Configured implements Tool{ public static String FILE_PATH=""; public static String OUT_PATH=""; @Override public int run(String[] args) throws Exception { FILE_PATH = args[0]; OUT_PATH = args[1]; Job job = new Job(new Configuration(), WordCountApp.class.getSimpleName()); job.setJarByClass(WordCountApp.class); final Configuration conf = new Configuration(); final FileSystem fileSystem = FileSystem.get(new URI(OUT_PATH), conf); if(fileSystem.exists(new Path(OUT_PATH))){ fileSystem.delete(new Path(OUT_PATH), true); } //1.1从哪里读取数据 FileInputFormat.setInputPaths(job, FILE_PATH); //把每一行数据解析成一个键值对 job.setInputFormatClass(TextInputFormat.class); //1.2自定义函数 job.setMapperClass(MyMapReduce.class); job.setMapOutputKeyClass(Text.class); job.setPartitionerClass(HashPartitioner.class); //1.3分区 job.setPartitionerClass(HashPartitioner.class); job.setNumReduceTasks(1); //1.4排序,分组 //1.5归约 //2.1框架自己完成 //2.2自定义reduce函数 job.setReducerClass(MyReduce.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(LongWritable.class); //2.3写入hdfs中去 FileOutputFormat.setOutputPath(job, new Path(OUT_PATH)); job.setOutputFormatClass(TextOutputFormat.class); job.waitForCompletion(true); return 0; } public static void main(String[] args) throws Exception { ToolRunner.run(new WordCountApp(), args); } static class MyMapReduce extends Mapper<LongWritable, Text, Text, LongWritable>{ protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String[] splits = line.split("\t"); for(String word:splits){ context.write(new Text(word),new LongWritable(1)); } } } static class MyReduce extends Reducer<Text, LongWritable, Text, LongWritable>{ protected void reduce(Text key, Iterable<LongWritable> values, Context context ) throws IOException, InterruptedException { long sum = 0L; for(LongWritable value: values) { sum+=value.get(); } context.write(key, new LongWritable(sum)); } } }
- 然后把这段代码打成jar包放到linux系统下,利用Hadoop的命令上传到hdfs系统中。
- 最后操作命令hadoop jar 打包的包名.jar 读取文件路径 输入的文件路径