1、需求
对流量案例的输出结果进行加工,输出流量使用量在前10的用户信息
2、需求分析
3、编写代码
(1)FlowBean
FlowBean的代码在原有的基础上实现WritableComparable<FlowBean>,并实现compareTo方法:
package com.wolf.mr.topn;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements WritableComparable<FlowBean> {
private long upFlow; // shang xing liu liang
private long downFlow; // xia xing liu liang
private long sumFlow; // zong liu liang
// kong can gou zao , wei le hou xu fan she yong
public FlowBean() {
super();
}
public FlowBean(long upFlow, long downFlow) {
super();
this.upFlow = upFlow;
this.downFlow = downFlow;
sumFlow = upFlow + downFlow;
}
// xu lie hua method
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(upFlow);
dataOutput.writeLong(downFlow);
dataOutput.writeLong(sumFlow);
}
// fan xu lie hua method
@Override
public void readFields(DataInput dataInput) throws IOException {
upFlow = dataInput.readLong();
downFlow = dataInput.readLong();
sumFlow = dataInput.readLong();
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
public void set(long upFlow, long downFlow) {
this.upFlow = upFlow;
this.downFlow = downFlow;
sumFlow = upFlow+downFlow;
}
@Override
public int compareTo(FlowBean bean) {
int result;
if (this.sumFlow > bean.getSumFlow()) {
result = -1;
}else if (this.sumFlow < bean.getSumFlow()) {
result = 1;
}else {
result = 0;
}
return result;
}
}
(2)FlowCountMapper
package com.wolf.mr.flowsum;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FlowCountMapper extends Mapper<LongWritable, Text,Text,FlowBean> {
Text k = new Text();
FlowBean v = new FlowBean();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1 13736230513 192.196.100.1 www.atguigu.com 2481 24681 200
// 1. get 1 line
String line = value.toString();
// 2. split by \t
String[] fields = line.split("\t");
// 3. package obj
k.set(fields[1]); // tele number
// attention the method here
long upFlow =Long.parseLong(fields[fields.length - 3]) ;
long downFlow =Long.parseLong(fields[fields.length - 2]) ;
v.setUpFlow(upFlow);
v.setDownFlow(downFlow);
// v.set(upFlow,downFlow);
// 4. write out
context.write(k,v);
}
}
(3)FlowCountReducer
package com.wolf.mr.flowsum;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean> {
FlowBean v = new FlowBean();
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
// input : telenum <upflow downflow sumflow(unknown)>
// 1. sum
long sum_upFlow = 0;
long sum_downFlow = 0;
for (FlowBean flowBean : values) {
sum_upFlow += flowBean.getUpFlow();
sum_downFlow += flowBean.getDownFlow();
}
v.set(sum_upFlow, sum_downFlow);
// 2. write out
context.write(key, v);
}
}
(4)FlowCountDriver
package com.wolf.mr.flowsum;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowCountDriver {
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
// 1. get job obj
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
// 2. set jar storage path
job.setJarByClass(FlowCountDriver.class);
// 3.link mapper and reducer
job.setMapperClass(FlowCountMapper.class);
job.setReducerClass(FlowCountReducer.class);
// 4.set mapper's type of key and value
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
// 5. set final output type of key and value
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// job.setPartitionerClass(ProvincePartitioner.class);
// job.setNumReduceTasks(5);
// 6. set input output path
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7. submit job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
参数
/home/wolf/phonesort.txt /home/wolf/output/topn_out
运行程序
运行结果
成功用MapReduce实现了TopN案例。