一、Combiner合并
原始数据
截取部分数据如下:
该数据为电影评分数据,分别是电影名、评分、时间、点评人ID。
需求:得到最火的10部电影
即得到评价次数最多的10部电影,先计数再排序再取出。
自定义Combiner实现步骤:
<1>自定义一个combiner继承Reducer,重写reduce方法
<2>在job中设置:job.setCombinerClass(WordcountCombiner.class);
代码如下:
因为要使用Combiner合并,在进入reduce前,先对map阶段的数据小合并一次
所以需要两个类,一个主函数类,一个Combiner类
(1)RateHotN.java
import com.Top.UidTopBean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.codehaus.jackson.map.ObjectMapper;
import java.io.IOException;
import java.util.Comparator;
import java.util.Map;
import java.util.TreeMap;
public class RateHotN {
public static class RateHotNMap extends Mapper<LongWritable, Text,Text, IntWritable> {
ObjectMapper objectMapper = new ObjectMapper();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
UidTopBean uidTopBean = objectMapper.readValue(line, UidTopBean.class);
String movie = uidTopBean.getMovie();
context.write(new Text(movie),new IntWritable(1));
}
}
public static class RateHotNReduce extends Reducer<Text,IntWritable,Text,IntWritable> {
TreeMap<IntWritable,Text> map;//类似于迭代器,一个存储器
@Override
protected void setup(Context context) throws IOException, InterruptedException {
map = new TreeMap<>(new Comparator<IntWritable>() {
@Override
public int compare(IntWritable o1, IntWritable o2) {
return o2.compareTo(o1);
}
});
}