目录
一、前提
(1)数据样例
文件位置 /root/info/data/8/flow.txt
18329153881 2481 24681
18329153882 1116 954
18329153883 2481 24681
18329153884 264 0
18329153881 132 1512
18329153883 240 0
18329153884 1527 2106
18329153882 1543 1684
(2)字段释义
字段中文释义 | 手机号 | 上行流量 | 下行流量 |
字段英文释义 | phone | upflow | downflow |
数据类型 | string | long | long |
(3)项目需求一
项目需求:统计每一个用户(手机号)所耗费的总上行流量、总下行流量、总流量(总流量=总上行流量+总下行流量)
(4)需求解析
与WordCount类似:按照key进行求和
此时的key是:手机号 value:手机号对应的<上行流量、下行流量、上行流量+下行流量>
① 因此map端的输出: <手机号,{上行流量,下行流量,上行流量+下行流量}>
此map端的输出
不再是基础类型,要自定义对象来实现Hadoop的序列化自定义对象的成员变量为:上行流量、下行流量、总流量
② 传递给reduce端,让reduce端按照key,即手机号做汇总操作
二、具体代码
1、实现自定义的bean来封装流量信息
这个bean的传输需要实现可序列化
package org.flowsum;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements Writable {
//自定义对象的成员变量有以下三个
private long upFlow; //①上行流量
private long downFlow; //②下行流量
private long sumFlow; //③总流量
//反序列化时,需要反射调用空参构造函数,必须有空参构造
public FlowBean(){
super();
}
//为了对象的数据的初始化方便,加入一个带参的构造函数
public FlowBean(long upFlow,long downFlow){
super();
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow;
this.sumFlow = upFlow + downFlow;
}
public long getUpFlow(){
return upFlow;
}
public void setUpFlow(long upFlow){
this.upFlow = upFlow;
}
public long getDownFlow(){
return downFlow;
}
public void setDownFlow(long downFlow){
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow){
this.sumFlow = sumFlow;
}
@Override
//重写序列化方法
public void write(DataOutput out) throws IOException{
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
//重写反序列化方法
public void readFields(DataInput in)throws IOException{
this.upFlow = in.readLong();
this.downFlow = in.readLong();
this.sumFlow = in.readLong();
}
@Override
//重写toString(),将结果显示在文件中
public String toString(){
return upFlow + "\t" +downFlow + "\t" + sumFlow;
}
}
2、MapReduce程序开发之流量求和
2.1. map端程序编写
具体框架代码如下:
public class FlowSumMapper extends Mapper<LongWritable,Text,Text,FlowBean>{
}
- KEYIN:一行文本的起始偏移量 VALUEIN:一行文本的内容
- KEYOUT:11位手机号 VALUEOUT:流量信息类
map()方法:
将读取的每一行文本内容拆分为3列:手机号、上行流量、下行流量
对 {上行流量,下行流量} 进行封装
每遇到一个手机号就把其转换成一个key-value对
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FlowSumMapper extends Mapper<LongWritable, Text ,Text , FlowBean>{
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable , Text , Text , FlowBean>.Context context)throws IOException,InterruptedException{
//将MapTask传给我们的每行文本内容转换为String
String line = value.toString();
//根据空格将这一行切分为单词
String[] splits = line.split("\t");
//抽取业务所需的字段
String telephone = splits[0]; //手机号
String upFlow = splits[1]; //上行流量
String downFlow = splits[2]; //下行流量
//获取上行流量和下行流量,对其进行封装
FlowBean fb = new FlowBean(Long.parseLong(upFlow),Long.parseLong(downFlow));
//将手机号作为key,将封装的流量信息类作为value
context.write(new Text(telephone),fb);
}
}
2.2.reduce端程序编写
具体框架代码如下:
public class FlowSumReducer extends Reducer<Text,FlowBean,Text,FlowBean>{
}
- KEYIN:11位手机号 VALUEIN:封装的流量信息类
- KEYOUT:11位手机号 VALUEOUT:累加求和后封装的流量信息类
reduce()方法:
接收MapTask的输出结果
按照key(手机号)对value(上行流量、下行流量)做汇总操作
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.io.Text;
import java.io.IOException;
public class FlowSumReducer extends Reducer<Text,FlowBean,Text,FlowBean> {
@Override
protected void reduce(Text key,Iterable<FlowBean> values,
Reducer<Text,FlowBean,Text,FlowBean>.Context context) throws IOException,InterruptedException{
//初始化两个变量
long sumUpFlow = 0L; //总上行流量
long sumDownFlow = 0L; //总下行流量
for(FlowBean fb : values){
sumUpFlow += fb.getUpFlow();
sumDownFlow += fb.getDownFlow();
}
//获取总上行流量和总下行流量,对其进行封装
FlowBean resultsum = new FlowBean(sumUpFlow,sumDownFlow);
//将手机号作为key,将封装的流量信息类作为value,写出最终结果
context.write(key,resultsum);
}
}
2.3.Driver端程序编写
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowSum {
public static void main(String[] args) throws IOException,ClassNotFoundException,InterruptedException{
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://192.168.230.13:9000");
Job job = Job.getInstance(conf);
job.setJarByClass(FlowSum.class);
job.setMapperClass(FlowSumMapper.class);
job.setReducerClass(FlowSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
Path inPath = new Path("/flow/input");
Path outPath = new Path("/flow/output_sum");
FileSystem fs = FileSystem.get(conf);
if(fs.exists(outPath)){
fs.delete(outPath,true);
}
FileInputFormat.addInputPath(job,inPath);
FileOutputFormat.setOutputPath(job,outPath);
boolean waitForCompletion = job.waitForCompletion(true);
System.exit(waitForCompletion ? 0:1);
}
}
结果: