1.需求
对需求2.3输出结果进行加工,输出流量使用量在前10的用户信息
(1)输入数据
13470253144 180 180 360
13509468723 7335 110349 117684
13560439638 918 4938 5856
13568436656 3597 25635 29232
13590439668 1116 954 2070
13630577991 6960 690 7650
13682846555 1938 2910 4848
13729199489 240 0 240
13736230513 2481 24681 27162
13768778790 120 120 240
13846544121 264 0 264
13956435636 132 1512 1644
13966251146 240 0 240
13975057813 11058 48243 59301
13992314666 3008 3720 6728
15043685818 3659 3538 7197
15910133277 3156 2936 6092
15959002129 1938 180 2118
18271575951 1527 2106 3633
18390173782 9531 2412 11943
84188413 4116 1432 5548
(2)输出数据
13509468723 7335 110349 117684
13975057813 11058 48243 59301
13568436656 3597 25635 29232
13736230513 2481 24681 27162
18390173782 9531 2412 11943
13630577991 6960 690 7650
15043685818 3659 3538 7197
13992314666 3008 3720 6728
15910133277 3156 2936 6092
13560439638 918 4938 5856
自定义比较器
package com.demo.mr.topN;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.WritableComparator;
// 自定义一个按照降序排列LongWritable类型的比较器
public class MySumFlowComparator extends WritableComparator{
public MySumFlowComparator() {
super(LongWritable.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
long thisValue = readLong(b1, s1);
long thatValue = readLong(b2, s2);
return (thisValue<thatValue ? 1 : (thisValue==thatValue ? 0 : -1));
}
}
Mapper类
package com.demo.mr.topN;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
// 13470253144 180 180 360
public class SortFlowBeanMapper extends Mapper<LongWritable, Text, LongWritable, Text>{
private LongWritable keyOut=new LongWritable();
private Text valueOut=new Text();
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, LongWritable, Text>.Context context)
throws IOException, InterruptedException {
String[] words = value.toString().split("\t");
long sumFlow = Long.parseLong(words[3]);
keyOut.set(sumFlow);
valueOut.set(words[0]);
context.write(keyOut,valueOut);
}
}
reduce类
package com.demo.mr.topN;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class SortFlowBeanReducer extends Reducer<LongWritable, Text, Text, LongWritable>{
private int sum=0;
@Override
protected void reduce(LongWritable sumFlow, Iterable<Text> phoneNums,
Reducer<LongWritable, Text, Text, LongWritable>.Context context) throws IOException, InterruptedException {
// 只输出10个记录
/*for (Text text : phoneNums) {
if (sum==10) {
break;
}
context.write(text, sumFlow);
sum++;
}*/
// 如果第十名存在并列情况,也输出所有并列的用户
if (sum<10) {
for (Text text : phoneNums) {
context.write(text, sumFlow);
sum++;
}
}
}
}
Driver类
package com.demo.mr.topN;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import com.demo.mr.flowbean.FlowBean;
import com.demo.mr.flowbean.FlowBeanDriver;
import com.demo.mr.flowbean.FlowBeanMapper;
import com.demo.mr.flowbean.FlowBeanReducer;
public class SortFlowBeanDriver {
public static void main(String[] args) throws Exception {
Path outPutPath = new Path("e:/mroutput/sortflowbean");
Configuration conf = new Configuration();
// 设置排序阶段使用自定义的比较器来比较key
conf.set("mapreduce.job.output.key.comparator.class", "com.atguigu.mr.topN.MySumFlowComparator");
FileSystem fs=FileSystem.get(conf);
if (fs.exists(outPutPath)) {
fs.delete(outPutPath, true);
}
Job job = Job.getInstance(conf);
job.setJarByClass(SortFlowBeanDriver.class);
job.setJobName("Sortflowbean");
job.setMapperClass(SortFlowBeanMapper.class);
job.setReducerClass(SortFlowBeanReducer.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileOutputFormat.setOutputPath(job, outPutPath);
FileInputFormat.setInputPaths(job, new Path("E:\\mrinput\\tonN"));
job.waitForCompletion(true);
}
}