首先数据格式是这样的:
Hello BigData
Hello Hadoop
Hello HDFS
BigData
Hadoop
Hadoop
MapReduce
Perfect
请注意 这个文本文件中有空行 所以在分隔的时候会有空值 我们要做的是在map阶段将空值去掉,以便于更精确的输出结果。
具体操作我已经在源代码里贴出。
package com.WorldCount;
import java.io.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.IFile;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
import org.apache.hadoop.util.GenericOptionsParser;
public class WorldCount {
public static class map1 extends Mapper<Object,Text,Text,IntWritable>{
public void map(Object key,Text value,Context context)throws IOException,InterruptedException{
String line = value.toString();
String[] list = line.split(" ");
//循环遍历依次写入
for (String word:list){
if ("".equals(word)){//这里是将切割的空值去掉
continue;
}
System.out.println(word);
context.write(new Text(word),new IntWritable(1));
}
}
}
public static class reduce1 extends Reducer<Text,IntWritable,Text,IntWritable>{
public void reduce(Text key,Iterable<IntWritable>values,Context context)throws IOException,InterruptedException{
int i = 0;
for (IntWritable value:values){
i += value.get();
}
//System.out.println(i);
context.write(key,new IntWritable(i));
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException{
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(WorldCount.class);
job.setMapperClass(map1.class);
job.setReducerClass(reduce1.class);
job.setMapOutputValueClass(IntWritable.class);
job.setMapOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
File file1 = new File("E:\\学习\\exercise\\out");
if (file1.exists()){
FileUtils.deleteDirectory(file1);
}
FileInputFormat.setInputPaths(job,new Path("E:\\学习\\exercise\\wordcount.txt"));
FileOutputFormat.setOutputPath(job,new Path("E:\\学习\\exercise\\out"));
job.waitForCompletion(true);
Boolean b = job.waitForCompletion(true);
System.out.println(b ? 0 : 1);
}
}