输出流量使用量在前10的用户信息:数据源:
13470253144 180 180 360
13509468723 7335 110349 117684
13560439638 918 4938 5856
13568436656 3597 25635 29232
13590439668 1116 954 2070
13630577991 6960 690 7650
13682846555 1938 2910 4848
13729199489 240 0 240
13736230513 2481 24681 27162
13768778790 120 120 240
13846544121 264 0 264
13956435636 132 1512 1644
13966251146 240 0 240
13975057813 11058 48243 59301
13992314666 3008 3720 6728
15043685818 3659 3538 7197
15910133277 3156 2936 6092
15959002129 1938 180 2118
18271575951 1527 2106 3633
18390173782 9531 2412 11943
84188413 4116 1432 5548
代码实现:
package com.isea.topN;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements WritableComparable<FlowBean> {
private long upFlow;
private long downFlow;
private long sumFlow;
public FlowBean(long upFlow, long downFlow) {
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow + downFlow;
}
public FlowBean() {
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
public void set(long upFlow,long downFlow){
this.upFlow = upFlow;
this.downFlow = downFlow;
sumFlow = upFlow + downFlow;
}
@Override
public int compareTo(FlowBean o) {
if (this.sumFlow > o.getSumFlow()){
return -1;
}else if (this.sumFlow < o.getSumFlow()){
return 1;
}else {
return 0;
}
}
@Override
public void write(DataOutput out) throws IOException {
// 序列化
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
this.upFlow = in.readLong();
this.downFlow = in.readLong();
this.sumFlow = in.readLong();
}
}
package com.isea.topN;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import java.util.Iterator;
import java.util.TreeMap;
public class TopNMapper extends Mapper<LongWritable, Text,FlowBean,Text> {
TreeMap<FlowBean,Text> kBeans = new TreeMap<>();
FlowBean flowBean ;
Text v ;
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
flowBean = new FlowBean();
v = new Text();
String line = value.toString();
String[] fields = line.split("\t");
flowBean.set(Long.parseLong(fields[1]),Long.parseLong(fields[2]));
v.set(fields[0]);
kBeans.put(flowBean,v);
if (kBeans.size() > 10){
kBeans.remove(kBeans.lastKey());
}
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
Iterator<FlowBean> it = kBeans.keySet().iterator();
while(it.hasNext()){
FlowBean bean = it.next();
context.write(bean,kBeans.get(bean));
}
}
}
package com.isea.topN;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.Iterator;
import java.util.TreeMap;
public class TopNReducer extends Reducer<FlowBean, Text,Text,FlowBean> {
TreeMap<FlowBean,Text> flowBeanTextTreeMap = new TreeMap<>();
@Override
protected void reduce(FlowBean key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
FlowBean flowBean = new FlowBean();
for (Text value : values) {
flowBean.set(key.getUpFlow(),key.getDownFlow());
flowBeanTextTreeMap.put(flowBean,value);
}
if (flowBeanTextTreeMap.size() > 10){
flowBeanTextTreeMap.remove(flowBeanTextTreeMap.lastKey());
}
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
Iterator<FlowBean> iterator = flowBeanTextTreeMap.keySet().iterator();
while (iterator.hasNext()){
FlowBean flowBean = iterator.next();
context.write(new Text(flowBeanTextTreeMap.get(flowBean)),flowBean);
}
}
}
package com.isea.topN;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class TopNDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args = new String[]{"g:/input/topN","g:/output3"};
// 1 获取配置信息,或者job对象实例
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
// 6 指定本程序的jar包所在的本地路径
job.setJarByClass(TopNDriver.class);
// 2 指定本业务job要使用的mapper/Reducer业务类
job.setMapperClass(TopNMapper.class);
job.setReducerClass(TopNReducer.class);
// 3 指定mapper输出数据的kv类型
job.setMapOutputKeyClass(FlowBean.class);
job.setMapOutputValueClass(Text.class);
// 4 指定最终输出的数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 5 指定job的输入原始文件所在目录
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 将job中配置的相关参数,以及job所用的java类所在的jar包, 提交给yarn去运行
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
输出结果:
13560439638 7335 110349 117684
13560439638 11058 48243 59301
13560439638 3597 25635 29232
13560439638 2481 24681 27162
13560439638 9531 2412 11943
13560439638 6960 690 7650
13560439638 3659 3538 7197
13560439638 3008 3720 6728
13560439638 3156 2936 6092
13560439638 918 4938 5856