解决数据倾斜问题,可以从自定义分区入手,但是如果对于wordcount场景来说,通过分区到多个reduce那么数据就不准确了。
这样就需要链条化的MR来处理。也就是多个Mapper来处理,然后再一个Reduce,后面可以再接Mapper来处理数据。
现在就以一个过滤敏感词和筛掉出现次数不大于2的word的频度来举例。
Mapper1:
package com.huawei.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* 统计单词
*/
public class Mapper1 extends Mapper<LongWritable,Text,Text,IntWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
Text keyout=new Text();
IntWritable keyvalue=new IntWritable();
String str=value.toString();
String[] strs=str.split(" ");
for(String t:strs){
keyout.set(t);
keyvalue.set(1);
context.write(keyout,keyvalue);
}
}
}
Mapper2
package com.huawei.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* 过滤敏感词汇
*/
public class Mapper2 extends Mapper<Text,IntWritable,Text,IntWritable>{
@Override
protected void map(Text key, IntWritable value, Context context) throws IOException, InterruptedException {
if(!key.toString().equals("hello")){
context.write(key,value);
}
}
}
Reduce:
package com.huawei.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class Reduce extends Reducer<Text,IntWritable,Text,IntWritable>{
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count=0;
for(IntWritable n:values){
count+=n.get();
}
context.write(key,new IntWritable(count));
}
}
Reduce之后的Mapper
package com.huawei.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class ReduceMapper1 extends Mapper<Text,IntWritable,Text,IntWritable>{
@Override
protected void map(Text key, IntWritable value, Context context) throws IOException, InterruptedException {
if(value.get()>2){
context.write(key,value);
}
}
}
Main函数
package com.huawei.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
import org.apache.hadoop.mapreduce.lib.chain.ChainReducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class ChainTest {
public static void main(String[] args) throws Exception {
Configuration conf=new Configuration();
conf.set("fs.defaultFS","file:///");
Job job=Job.getInstance(conf);
FileSystem fs=FileSystem.get(conf);
if(fs.exists(new Path(args[1]))){
fs.delete(new Path(args[1]),true);
}
job.setJarByClass(ChainTest.class);
FileInputFormat.addInputPath(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
job.setInputFormatClass(TextInputFormat.class);
ChainMapper.addMapper(job,Mapper1.class,LongWritable.class,Text.class,Text.class,IntWritable.class,conf);
ChainMapper.addMapper(job,Mapper2.class,Text.class,IntWritable.class,Text.class,IntWritable.class,conf);
ChainReducer.setReducer(job,Reduce.class,Text.class,IntWritable.class,Text.class,IntWritable.class,conf);
ChainReducer.addMapper(job,ReduceMapper1.class,Text.class,IntWritable.class,Text.class,IntWritable.class,conf);
job.waitForCompletion(true);
}
}
总体来说,实现还是很简单的。