MapReduce2_序列化
文章目录
1. Hadoop序列化
1.1 和Java序列化比较
Java的序列化框架Serializable序列化一个对象之后会附带很多额外的信息,比如校验信息,Header,继承体系等,不利于在网络中传输。
Hadoop的序列化机制Writable
- 紧凑:省空间
- 快速:额外开销小
- 互操作:多语言交互
1.2 bean对象实现序列化接口
- 实现Writable接口
- 创建空参构造
- 反序列化时,需要反射调用空参构造,所以必须有
- 重写序列化方法
- 重写反序列化方法
- 注意反序列化的顺序和序列化完全一致
- 要想把结果显示在文件中,需要重写toString,可用\t分开,方便使用
- 如果需要将自定义的bean放在key中传输,则还需要实现Comparable,因为MapReduce框中的Shuffle过程要求对key必须能排序
2. 上下行流量统计案例
2.1 项目目的
将txt文件数据进行分割,统计每个手机号的上行流量,下行流量,总流量
2.2 思路
将手机号,上行流量,下行流量,总流量四个属性封装成一个FlowBean对象
Map阶段对每行数据进行切分
Reduce阶段对数据进行累加
注意的点:
因为需要Map阶段向Reduce阶段传输,所以需要对FlowBean对象进行序列化
2.3 代码实现
2.3.1 FlowBean
实现hadoop.io.Writable接口
重写write()方法和readFields()方法,注意写和读属性的顺序应一致
留出空参构造器
重写toString方法,方便Map阶段切分
package edu.tyut.mapreduce.writbale;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* @author Ricardo Jia
* @description
* @since 2021-08-17
*/
public class FlowBean implements Writable {
private long upFlow;//上传流量
private long downFlow;//下载流量
private long sumFlow;//总流量
public FlowBean() {
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
//注意读的顺序和写的顺序保持一致
this.upFlow = in.readLong();
this.downFlow = in.readLong();
this.sumFlow = in.readLong();
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
public void setSumFlow() {
this.sumFlow = this.upFlow + this.downFlow;
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
}
2.3.2 FlowMapper
继承hadoop.mapreduce.Mapper类
重写map方法
package edu.tyut.mapreduce.writbale;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* @author Ricardo Jia
* @description
* @since 2021-08-17
*/
public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
private final Text outK = new Text();
private final FlowBean outV = new FlowBean();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//获取一行数据,转成字符串
String line = value.toString();
//切割数据
String regex = "\t";
String[] split = line.split(regex);
//抓取需要的手机号,上行流量,下行流量
String phone = split[1];
String up = split[split.length - 3];
String down = split[split.length -2];
//设置outK,outV值
outK.set(phone);
outV.setUpFlow(Long.parseLong(up));
outV.setDownFlow(Long.parseLong(down));
outV.setSumFlow();
//封装outK,outV
context.write(outK,outV);
}
}
2.3.3 FlowReducer
继承hadoop.mapreduce.Reducer类
重写reduce方法
package edu.tyut.mapreduce.writbale;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @author Ricardo Jia
* @description
* @since 2021-08-17
*/
public class FlowReducer extends Reducer<Text, FlowBean, Text, FlowBean> {
private final FlowBean outV = new FlowBean();
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Reducer<Text, FlowBean, Text, FlowBean>.Context context) throws IOException, InterruptedException {
long totalUp = 0;
long totalDown = 0;
//遍历values,将上行流量和下行流量进行累加
for (FlowBean value : values) {
totalUp += value.getUpFlow();
totalDown += value.getDownFlow();
}
//设置outKV
outV.setUpFlow(totalUp);
outV.setDownFlow(totalDown);
outV.setSumFlow();
//封装outK和outV
context.write(key, outV);
}
}
2.3.4 FlowDriver
- 获取job对象,设置configuration信息
- 设置运行的jar包的启动类
- 关联Mapper和Reducer
- 设置map的输入和输出类型
- 关联最终输出的KV类型
- 设置程序最终输入输出路径
- 提交job
package edu.tyut.mapreduce.writbale;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* @author Ricardo Jia
* @description
* @since 2021-08-17
*/
public class FlowDriver {
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
//获取job对象
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//关联Driver类
job.setJarByClass(FlowDriver.class);
//关联Mapper和Reducer
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
//设置Map端输出KV类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
//设置最终输出的KV类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//设置程序最终输入输出路径
FileInputFormat.setInputPaths(job, new Path("D:\\dev\\hadoop\\phone_data.txt"));
FileOutputFormat.setOutputPath(job, new Path("D:\\dev\\hadoop\\FlowOutput"));
//提交job
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : 1);
}
}