1) 什么是序列化
序列化就是把内存中的对象,转换成字节序列(或其他数据传输协议)以便于存储到磁
盘(持久化)和网络传输。
反序列化就是将收到字节序列(或其他数据传输协议)或者是磁盘的持久化数据,转换
成内存中的对象。
2) 为什么要序列化
一般来说,“活的”对象只生存在内存里,关机断电就没有了。而且“活的”对象只能
由本地的进程使用,不能被发送到网络上的另外一台计算机。 然而序列化可以存储“活的”
对象,可以将“活的”对象发送到远程计算机。
3)为什么不用 Java 的序列化
Java 的序列化是一个重量级序列化框架(Serializable),一个对象被序列化后,会附带
很多额外的信息(各种校验信息,Header,继承体系等),不便于在网络中高效传输。所以,
Hadoop 自己开发了一套序列化机制(Writable)。
4)Hadoop 序列化特点:
(1 ) 紧凑 :高效使用存储空间。
(2 )快速:读写数据的额外开销小。
(3 )互操作:支持多语言的交互
我们以统计手机的流量为例,去写一个例子
1 13736230513 192.196.100.1 www.atguigu.com 2481 24681 200
2 13846544121 192.196.100.2 264 0 200
3 13956435636 192.196.100.3 132 1512 200
4 13966251146 192.168.100.1 240 0 404
5 18271575951 192.168.100.2 www.atguigu.com 1527 2106 200
6 84188413 192.168.100.3 www.atguigu.com 4116 1432 200
7 13590439668 192.168.100.4 1116 954 200
8 15910133277 192.168.100.5 www.hao123.com 3156 2936 200
9 13729199489 192.168.100.6 240 0 200
10 13630577991 192.168.100.7 www.shouhu.com 6960 690 200
11 15043685818 192.168.100.8 www.baidu.com 3659 3538 200
12 15959002129 192.168.100.9 www.atguigu.com 1938 180 500
13 13560439638 192.168.100.10 918 4938 200
14 13470253144 192.168.100.11 180 180 200
15 13682846555 192.168.100.12 www.qq.com 1938 2910 200
16 13992314666 192.168.100.13 www.gaga.com 3008 3720 200
17 13509468723 192.168.100.14 www.qinghua.com 7335 110349 404
18 18390173782 192.168.100.15 www.sogou.com 9531 2412 200
19 13975057813 192.168.100.16 www.baidu.com 11058 48243 200
20 13768778790 192.168.100.17 120 120 200
21 13568436656 192.168.100.18 www.alibaba.com 2481 24681 200
22 13568436656 192.168.100.19 1116 954 200
首先创建一个Maven工程
设置maven配置
然后我们添加对应的jar包
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.1.3</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.30</version>
</dependency>
</dependencies>
然后创建bean对象
package com.gk.mapreduce;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* create with IntelliJ IDEA
*
* @Project :hadoop-writable
* @Package :com.gk.mapreduce
* @ClassName :
* @CreateTime :2022/3/919:38
* @Version :1.0
* @Author :锦林
* @Email :836658031@qq.com
**/
public class FlowBean implements Writable {
private Long upFlow; // 上行流量
private Long downFlow; // 下行流量
private Long sumFlow; // 总流量
// 提供无参构造
public FlowBean() {
}
public Long getUpFlow() {
return upFlow;
}
public void setUpFlow(Long upFlow) {
this.upFlow = upFlow;
}
public Long getDownFlow() {
return downFlow;
}
public void setDownFlow(Long downFlow) {
this.downFlow = downFlow;
}
public Long getSumFlow() {
return sumFlow;
}
public void setSumFlow(Long sumFlow) {
this.sumFlow = sumFlow;
}
public void setSumFlow() {
this.sumFlow = this.upFlow + this.downFlow;
}
// 实现序列化和反序列化方法,注意顺序一定要保存一致
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(upFlow);
dataOutput.writeLong(downFlow);
dataOutput.writeLong(sumFlow);
}
public void readFields(DataInput dataInput) throws IOException {
this.upFlow = dataInput.readLong();
this.downFlow = dataInput.readLong();
this.sumFlow = dataInput.readLong();
}
// 重写toString方法
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
}
创建Mapper
package com.gk.mapreduce;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* create with IntelliJ IDEA
*
* @Project :hadoop-writable
* @Package :com.gk.mapreduce
* @ClassName :
* @CreateTime :2022/3/919:44
* @Version :1.0
* @Author :锦林
* @Email :836658031@qq.com
**/
public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
private Text outK = new Text();
private FlowBean outV = new FlowBean();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1.获取一行数据
String line = value.toString();
// 2.切割数据
String[] split = line.split("\t");
// 3.抓取我们需要的数据:手机号,上行流量,下行流量
String phone = split[1];
String upFlow = split[split.length - 3];
String downFlow = split[split.length - 2];
// 4.封装outK,outV
outK.set(phone);
outV.setUpFlow(Long.valueOf(upFlow));
outV.setDownFlow(Long.valueOf(downFlow));
outV.setSumFlow();
// 5.写出
context.write(outK,outV);
}
}
编写Reducer
package com.gk.mapreduce;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* create with IntelliJ IDEA
*
* @Project :hadoop-writable
* @Package :com.gk.mapreduce
* @ClassName :
* @CreateTime :2022/3/919:51
* @Version :1.0
* @Author :锦林
* @Email :836658031@qq.com
**/
public class FlowReducer extends Reducer<Text,FlowBean,Text,FlowBean> {
private FlowBean outV = new FlowBean();
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
long upFlow = 0;
long downFlow = 0;
// 1.遍历获取数据
for (FlowBean value : values) {
upFlow += value.getUpFlow();
downFlow += value.getDownFlow();
}
// 2.封装outV
outV.setUpFlow(upFlow);
outV.setDownFlow(downFlow);
outV.setSumFlow();
// 3.写出
context.write(key,outV);
}
}
编写Driver
package com.gk.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* create with IntelliJ IDEA
*
* @Project :hadoop-writable
* @Package :com.gk.mapreduce
* @ClassName :
* @CreateTime :2022/3/919:56
* @Version :1.0
* @Author :锦林
* @Email :836658031@qq.com
**/
public class FlowDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
// 1.获取job
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
// 2.读取jar包
job.setJarByClass(FlowDriver.class);
// 3.连接mapper和reducer
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
// 4.mapper的输出格式
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
// 5.最终结果的输出格式
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 6.输入文件和输出文件路径
FileInputFormat.setInputPaths(job, new Path("E:\\SGG-Hadoop\\wcInput"));
FileOutputFormat.setOutputPath(job, new Path("E:\\SGG-Hadoop\\wcOutput"));
// 7.提交job
boolean flag = job.waitForCompletion(true);
System.exit(flag ? 0 : 1);
}
}
现在让我们去执行以下main方法
大家可以去对比一下数据,已经将对应号码的流量进行相加减