1.数据量小的情况直接用cleanup()单ReduecTask处理 实现类要序列化和排序
import Pojo.TopBean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.*;
/**
* @Author:zcongxiang
* @Description:
* @Date:Created in 21:32 2019/5/31
* @Modified By:
*/
public class TopReduce extends Reducer<Text, IntWritable,Text,IntWritable> {
private List<TopBean> list = new ArrayList<TopBean>();
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
Iterator<IntWritable> iterator = values.iterator();
int count = 0;
while (iterator.hasNext()){
IntWritable next = iterator.next();
count += next.get();
}
TopBean bean = new TopBean();
bean.set(key.toString(), count);
//context.write(key, new IntWritable(count));
list.add(bean);
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
Collections.sort(list, new Comparator<TopBean>() {
public int compare(TopBean o1, TopBean o2) {
if(o1.getValue() != o2.getValue()){
return o2.getValue() - o1.getValue();
}
return o1.getPath().compareTo(o2.getPath());
}
});
int i = 0;
Configuration configuration = context.getConfiguration();
int topN = configuration.getInt("topN", 5);
for (TopBean b:list
) {
context.write(new Text(b.getPath()), new IntWritable(b.getValue()));
i++;
if(i==topN)return;
}
}
}
import Pojo.FlowBean;
import Pojo.TopBean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* @Author:zcongxiang
* @Description:
* @Date:Created in 20:59 2019/5/31
* @Modified By:
*/
public class JobClient2 {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "file:///");
conf.set("mapreduce.framework.name", "local");
//排行榜数值设置,传入到Context中
conf.setInt("topN", Integer.parseInt(args[0]));
Job job = Job.getInstance(conf);
job.setJarByClass(JobClient2.class);
job.setMapperClass(TopMapper.class);
job.setReducerClass(TopReduce.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.setInputPaths(job,new Path("D:\\logs\\request.dat"));
FileOutputFormat.setOutputPath(job, new Path("D:\\logs\\TopNNNN"));
job.setNumReduceTasks(1);
boolean b = job.waitForCompletion(true);
System.exit(b?0:-1);
}
}
2.数据量太大,redis或者内存会溢出,两个mapreduce处理,建立jobcontrol,shuffle层会为key排序,可以自定义key的compare,升序降序
import Pojo.TopBean;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.jobcontrol.JobControl;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.util.Iterator;
/**
* @Author:zcongxiang
* @Description: 两次mapreduce统计topN
* @Date:Created in 9:47 2019/6/1
* @Modified By:
*/
public class TowJobModel {
//第一个Map
public static class Map_First extends Mapper<LongWritable, Text,Text, IntWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String s = value.toString();
String[] s1 = s.split(" ");
context.write(new Text(s1[1]), new IntWritable(1));
}
}
//第一个Reduce
public static class Reduce_First extends Reducer<Text,IntWritable,Text,IntWritable>{
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count = 0;
Iterator<IntWritable> iterator = values.iterator();
while (iterator.hasNext()){
IntWritable next = iterator.next();
count += next.get();
}
context.write(key, new IntWritable(count));
}
}
//第二个Map
public static class Map_Second extends Mapper<LongWritable,Text, TopBean, NullWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String s = value.toString();
String[] split = s.split("\t");
TopBean topBean = new TopBean();
topBean.set(split[0],Integer.parseInt(split[1]));
context.write(topBean, NullWritable.get());
}
}
//第二个Reduce
public static class Reduce_Second extends Reducer<TopBean,NullWritable,Text,IntWritable>{
@Override
protected void reduce(TopBean key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
context.write(new Text(key.getPath()), new IntWritable(key.getValue()));
}
}
//JOB启动
public static void main(String[] args) throws IOException {
JobConf conf = new JobConf(TowJobModel.class);
//第一个Job配置
Job job1 = new Job(conf, "job1");
job1.setMapperClass(Map_First.class);
job1.setReducerClass(Reduce_First.class);
job1.setJarByClass(TowJobModel.class);
job1.setMapOutputKeyClass(Text.class);
job1.setMapOutputValueClass(IntWritable.class);
job1.setOutputKeyClass(Text.class);
job1.setOutputValueClass(IntWritable.class);
//加入控制器
ControlledJob contrjob1 = new ControlledJob(conf);
contrjob1.setJob(job1);
//Job1输入输出路径
FileInputFormat.addInputPath(job1, new Path(args[0]));
FileOutputFormat.setOutputPath(job1, new Path(args[1]));
//第二个Job
Job job2 = new Job(conf, "job2");
job2.setJarByClass(TowJobModel.class);
job2.setMapperClass(Map_Second.class);
job2.setReducerClass(Reduce_Second.class);
job2.setMapOutputKeyClass(TopBean.class);
job2.setMapOutputValueClass(NullWritable.class);
job2.setOutputKeyClass(Text.class);
job2.setOutputValueClass(IntWritable.class);
//JOb2加入控制器
ControlledJob contrjob2 = new ControlledJob(conf);
contrjob2.setJob(job2);
//设置多个作业依赖
//意思是job2在启动前,依赖job1的完成
contrjob2.addDependingJob(contrjob1);
FileInputFormat.addInputPath(job2, new Path(args[1]));
FileOutputFormat.setOutputPath(job2, new Path(args[2]));
//建立主控制器,控制两个控制器
JobControl jobC = new JobControl("myCon");
//添加到主控制器
jobC.addJob(contrjob1);
jobC.addJob(contrjob2);
//线程启动 一定要有
Thread thread = new Thread(jobC);
thread.start();
while (true) {
if (jobC.allFinished()) { //如果作业完成就打印出来
System.out.println(jobC.getSuccessfulJobList());
jobC.stop();
return;
}
}
}
}