WordCount思想:通过Mapper对文档内部字段进行分词,再由Reduce进行相同词汇进行汇总、统计(理解可能有误,还望路过的大神指正)
MyMapper:
package com.alvis.workcount;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
//map阶段:读取文本进行分词
//例如value = i love Guiyang 一行文本
//将Text数据类型装换成String类型
String formatValue = value.toString();
//目的,将Text文本按空格进行分割
String[] spiltWord = formatValue.split(" ");
//分词好之后将每个单词及其出现的次数输出给下一阶段
for(String word : spiltWord){
context.write(new Text(word), new IntWritable(1));
}
}
}
MyReduce:
package com.alvis.workcount;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class MyReduce extends Reducer<Text, IntWritable, Text, IntWritable> {
@Override
protected void reduce(Text key3, Iterable<IntWritable> value3,
Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
//reduce 合并和统计
int sum = 0;
for(IntWritable intWritable : value3){
sum += intWritable.get();
}
context.write(new Text(key3), new IntWritable(sum));
}
}
MyJob
package com.alvis.workcount;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class MyJob {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//1、创建程序入口及获取配置文件对象
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
job.setJarByClass(MyJob.class);
//2、指定Job的map的输出及其输出的类型
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//3、指定job的reduce输出及输出类型
job.setReducerClass(MyReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//4、指定job的输入文件及输出文件的路径
FileInputFormat.setInputPaths(job, new Path("/input/test.txt"));
FileOutputFormat.setOutputPath(job, new Path("/output/2019326"));
//5、执行程序
job.waitForCompletion(true);
}
}