Partition分区
需求:按照条件输出到不同文件中。
案例:按照手机归属地输出到不同文件中。
1、默认partitioner分区
默认分区根据key的hashCode对ReduceTasks个数取模得到。
public class HashPartitioner<K, V> extends Partitioner<K, V> {
public int getPartition(K key, V value,int numReduceTasks) {
return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks;
}
}
2、自定义Partitioner步骤
- 自定义类继承Partitioner类,重写getPartition()方法
- 在Job驱动中,设置自定义Partitioner;
- 自定义partition后,根据自定义Partitioner的逻辑设置相应数量的ReduceTask。
3、注意事项
- 如果ReduceTask的数量 > getPartition的结果数,则会多产生几个空的输出文件part-r-000xx ;
- 如果1<ReduceTask的数量<getPartition的结果数,则有一部分分区数据无处安放,会Exception;
- 如果ReduceTask的数量=1,则不管MapTask端输出多少个分区文件,最终结果都交给这一个ReduceTask,最终也就只会产生一个结果文件part-r-00000 ;
- 分区号必须从零开始,逐一累加。
4、案例
FlowBean
package com.hpu.hadoop.partitioner;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements Writable {
private Integer upFlow;
private Integer downFlow;
private Integer sumFlow;
public FlowBean(){}
public Integer getUpFlow() {
return upFlow;
}
public void setUpFlow(Integer upFlow) {
this.upFlow = upFlow;
}
public Integer getDownFlow() {
return downFlow;
}
public void setDownFlow(Integer downFlow) {
this.downFlow = downFlow;
}
public Integer getSumFlow() {
return sumFlow;
}
public void setSumFlow(Integer sunFlow) {
this.sumFlow = sunFlow;
}
public void setSumFlow() {
this.sumFlow = this.upFlow+this.downFlow;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(this.upFlow);
out.writeInt(this.downFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
this.upFlow = in.readInt();
this.downFlow = in.readInt();
}
@Override
public String toString() {
return this.upFlow+"\t"+this.downFlow+"\t"+this.sumFlow;
}
}
Mapper:
package com.hpu.hadoop.partitioner;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FlowMapper extends Mapper<LongWritable, Text,Text, FlowBean> {
private Text phone;
private FlowBean flowBean;
@Override
protected void setup(Context context) throws IOException, InterruptedException {
flowBean = new FlowBean();
phone = new Text();
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] split = line.split("\t");
flowBean.setUpFlow(Integer.parseInt(split[split.length-3]));
flowBean.setDownFlow(Integer.parseInt(split[split.length-2]));
flowBean.setSumFlow();
phone.set(split[1]);
context.write(phone,flowBean);
}
}
自定义Partitioner:
package com.hpu.hadoop.partitioner;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
public class CustomPartitioner extends Partitioner<Text,FlowBean> {
@Override
public int getPartition(Text text, FlowBean flowBean, int numPartitions) {
String Phone = text.toString();
String P = Phone.substring(0, 3);
if ("136".equals(P)){
return 0;
} else if ("137".equals(P)){
return 1;
} else if ("138".equals(P)){
return 2;
} else if ("139".equals(P)){
return 3;
} else {
return 4;
}
}
}
Reducer:
package com.hpu.hadoop.partitioner;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FlowReducer extends Reducer<Text, FlowBean,Text, FlowBean> {
private FlowBean flowBean;
private int sumUp;
private int sumDown;
@Override
protected void setup(Context context) throws IOException, InterruptedException {
flowBean = new FlowBean();
}
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
sumUp = 0;
sumDown = 0;
for (FlowBean value : values) {
sumUp += value.getUpFlow();
sumDown += value.getDownFlow();
}
flowBean.setUpFlow(sumUp);
flowBean.setDownFlow(sumDown);
flowBean.setSumFlow();
context.write(key,flowBean);
}
}
Driver:
package com.hpu.hadoop.partitioner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//1.配置
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//2.Driver
job.setJarByClass(FlowDriver.class);
//3.Mapper
job.setMapperClass(FlowMapper.class);
//4.Reducer
job.setReducerClass(FlowReducer.class);
//5.KV
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
//6.OKV
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 指定自定义分区器
job.setPartitionerClass(CustomPartitioner.class);
// 同时指定相应数量的ReduceTask
job.setNumReduceTasks(5);
//7.InPath
FileInputFormat.setInputPaths(job,new Path("F:\\input\\inputflow\\phone_data.txt"));
FileOutputFormat.setOutputPath(job,new Path("E:\\Test\\f4"));
//8.提交
job.waitForCompletion(true);
}
}