要求:将统计的结果按照条件输出到不同文件中(分区)。比如:将统计结果按照收集归属地不同省份输出到不同文件中(分区)
默认Partition分区
默认分区是根据key的hashcode对ReduceTasks个数取模得到的,用户无法控制哪个key存储到哪个分区
可以在驱动类中编写如下进行分区操作
job.setNumReduceTasks(5);//设置几个分区
自定义分区
步骤:
1. 自定义类继承Partitioner ,重写getPartition()方法
public class ProvincePartitioner extends Partitioner<FlowBean, Text> {
@Override
public int getPartition(FlowBean key, Text value, int numPartitions) {
return partiton;
}
}
2. 在job驱动中,设置自定义Partitioner
job.setPartitionerClass(ProvincePartitioner.class);//关联自定义分区
3. 自定义Partition后,要根据自定义Partitioner的逻辑设置相应数量的ReduceTask
job.setNumReduceTasks(5);//设置几个分区
分区总结:
1.如果ReduceTask 的数量> getPartition的结果数量,则会多产生几个空个的输出文件
2.如果1<ReduceTask 的数量> getPartition的结果数量,则有一部分分区数据无处安放,会Exception
3. 如果ReduceTask 的数量=-1,则不管MapTask端输出多少个分区文件,最终结果都交给这一个ReduceTask ,最终也就会产生一个结果文件
4.分区号必须从零开始,逐一累加
案例分析
例如:假设自定义分区数量为5 ,则
- job.setNumReduceTasks(1); 会正常运行,则不过会产生一个输出文件
- job.setNumReduceTasks(2); 会报错
- job.setNumReduceTasks(6); 大于5 ,程序会正常运行,会产生空文件
案例实操
1.需求
将统计结果按照手机归属地不同省份输出到不同文件中(分区)
期望输出数据
手机号136、137、138、139开头都分别放到一个独立的4个文件中,其他开头的放到一个文件中。
自定义一个bean
package com.hadwinling.mapreduce.sort;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/12 上午10:55
*/
public class FlowBean implements WritableComparable<FlowBean> {
private long upFlow; // 上行流量
private long downFlow; // 下行流量
private long sumFlow; // 总流量
public FlowBean() {
super();
}
public FlowBean(long upFlow, long downFlow) {
super();
this.upFlow = upFlow;
this.downFlow = downFlow;
sumFlow = upFlow + downFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
// 比较
public int compareTo(FlowBean bean) {
int result;
// 核心比较条件判断
if (sumFlow > bean.getSumFlow()) {
result = -1;
}else if (sumFlow < bean.getSumFlow()) {
result = 1;
}else {
result = 0;
}
return result;
}
// 序列化
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(downFlow);
dataOutput.writeLong(upFlow);
dataOutput.writeLong(sumFlow);
}
// 反序列化
public void readFields(DataInput dataInput) throws IOException {
downFlow = dataInput.readLong();
upFlow = dataInput.readLong();
sumFlow = dataInput.readLong();
}
}
编写分区类
package com.hadwinling.mapreduce.sort;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
/**
* @author :HadwinLing
* @version 1.0
* @description: 自定义分区
* @date 2020/11/12 上午11:09
*/
public class ProvincePartitioner extends Partitioner<FlowBean, Text> {
@Override
public int getPartition(FlowBean key, Text value, int numPartitions) {
// 按照手机号的前三位分区
String prePhoneNum = value.toString().substring(0, 3);
int partiton = 4;
if ("136".equals(prePhoneNum)) {
partiton = 0;
}else if ("137".equals(prePhoneNum)) {
partiton = 1;
}else if ("138".equals(prePhoneNum)) {
partiton = 2;
}else if ("139".equals(prePhoneNum)) {
partiton = 3;
}
return partiton;
}
}
编写mapper
package com.hadwinling.mapreduce.sort;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/12 上午11:00
*/
public class FlowCountSortMapper extends Mapper<LongWritable, Text,FlowBean,Text> {
FlowBean k = new FlowBean();
Text v = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1 获取一行
String line = value.toString();
// 2 切割
String[] fields = line.split("\t");
// 3 封装对象
String phoneNum = fields[0];
long downFlow = Long.parseLong(fields[1]);
long upFlow = Long.parseLong(fields[2]);
long sumFlow = Long.parseLong(fields[3]);
k.setDownFlow(downFlow);
k.setUpFlow(upFlow);
k.setSumFlow(sumFlow);
v.set(phoneNum);
// 4 写出
context.write(k, v);
}
}
编写reducer
package com.hadwinling.mapreduce.sort;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/12 上午11:07
*/
public class FlowCountSortReducer extends org.apache.hadoop.mapreduce.Reducer<FlowBean, Text, Text, FlowBean> {
@Override
protected void reduce(FlowBean key, Iterable<Text> values, Reducer<FlowBean, Text, Text, FlowBean>.Context context)
throws IOException, InterruptedException {
// 13736230513 2481 24681 27162
for (Text value : values) {
context.write(value, key);
}
}
}
编写驱动类
package com.hadwinling.mapreduce.sort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/12 上午11:09
*/
public class FlowCountSortDriver {
public static void main(String[] args) throws IllegalArgumentException, IOException, ClassNotFoundException, InterruptedException {
// 输入输出路径需要根据自己电脑上实际的输入输出路径设置
args = new String[]{"/home/hadoop/MyTmp/mapreduceTest.txt", "/home/hadoop/workplace/Result/mapreduceTestReduce.txt"};
// 1 获取配置信息,或者job对象实例
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
// 6 指定本程序的jar包所在的本地路径
job.setJarByClass(FlowCountSortDriver.class);
// 2 指定本业务job要使用的mapper/Reducer业务类
job.setMapperClass(FlowCountSortMapper.class);
job.setReducerClass(FlowCountSortReducer.class);
// 3 指定mapper输出数据的kv类型
job.setMapOutputKeyClass(FlowBean.class);
job.setMapOutputValueClass(Text.class);
// 4 指定最终输出的数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 关联分区
job.setPartitionerClass(ProvincePartitioner.class);//关联自定义分区
job.setNumReduceTasks(5);//设置几个分区
// 5 指定job的输入原始文件所在目录
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 将job中配置的相关参数,以及job所用的java类所在的jar包, 提交给yarn去运行
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}