MapReduce实现WordCount
1.实现mapper
KEYIN: Map任务读数据的key类型,offset,是每行数据起始位置的偏移量,Long
VALUEIN:Map任务读数据的value类型,其实就是一行行的字符串,StringKEYOUT: map方法自定义实现输出的key的类型,String VALUEOUT:
map方法自定义实现输出的value的类型,Integer
public class WordCountMapper extends Mapper<LongWritable,Text,Text,IntWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 把value对应的行数据按照指定的分隔符拆开
String[] words = value.toString().split("\t");
for(String word : words) {
// (hello,1) (world,1)
context.write(new Text(word.toLowerCase()), new IntWritable(1));
}
}
}
2.实现Reducer
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count = 0;
Iterator<IntWritable> iterator = values.iterator();
//<1,1,1>
while (iterator.hasNext()) {
IntWritable value = iterator.next();
count += value.get();
}
context.write(key, new IntWritable(count));
}
2.实现job
public static void main(String[] args) throws Exception{
//这行代码设置了使用用户username,我这边是hadoop
System.setProperty("HADOOP_USER_NAME", "hadoop");
Configuration configuration = new Configuration();
//设置默认的hdfs
configuration.set("fs.defaultFS","hdfs://192.168.199.233:8020");
// 创建一个Job
Job job = Job.getInstance(configuration);
// 设置Job对应的参数: 主类
job.setJarByClass(WordCountApp.class);
// 设置Job对应的参数: 设置自定义的Mapper和Reducer处理类
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 设置Job对应的参数: Mapper输出key和value的类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置Job对应的参数: Reduce输出key和value的类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 如果输出目录已经存在,则先删除
FileSystem fileSystem = FileSystem.get(new URI("hdfs://192.168.199.233:8020"),configuration, "hadoop");
Path outputPath = new Path("/wordcount/output");
if(fileSystem.exists(outputPath)) {
fileSystem.delete(outputPath,true);
}
// 设置Job对应的参数: Mapper输出key和value的类型:作业输入和输出的路径
FileInputFormat.setInputPaths(job, new Path("/wordcount/input"));
FileOutputFormat.setOutputPath(job, outputPath);
// 提交job
job.waitForCompletion(true);
}
3.:Combiner操作
在map端先做一次聚合,以减轻shuffle的压力,提升io和网络性能
这个聚合操作的逻辑和reduce逻辑一致
只需要在job端添加以下代码即可
// 添加Combiner的设置即可
job.setCombinerClass(WordCountReducer.class);
优点:减少io,提升作业的执行执行性能
局限性:有关于除法的操作,慎用!!!
4.:Partition操作
将作业的运行结果进行分区操作
首先定义分区规则类
Text phone:进来的KEY
Access access:进来的value
int numReduceTasks:要分几个区
public class AccessPartitioner extends Partitioner<Text, Access>{
/**
* @param phone 手机号
*/
@Override
public int getPartition(Text phone, Access access, int numReduceTasks) {
if(phone.toString().startsWith("13")) {
return 0;
} else if(phone.toString().startsWith("15")) {
return 1;
} else {
return 2;
}
}
}
然后要在job端添加以下代码生效
// 设置自定义分区规则
job.setPartitionerClass(AccessPartitioner.class);
// 设置reduce个数
job.setNumReduceTasks(3);