hadoop mapreduce 自定义分组实现
任务要求:对流量日志进行流量统计,将不同省份的用户统计结果输出到不同的文件;
需要自定义改造两个机制:
1、改造分区的逻辑,自定义一个Partitioner类
2、自定义reducer task的并发任务数;
具体代码java:
mapreduce 以及 主类代码:
package com.cjp.areaPartitioner;
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import com.cjp.sumflow.FlowBean;
public class FlowSum {
public static class Maps extends Mapper<LongWritable, Text, Text, FlowBean> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] split = StringUtils.split(line, "\t");
String phoneNum = split[1];
long up_flow_sum = Integer.parseInt(split[5]);
long d_flow_sum = Integer.parseInt(split[6]);
context.write(new Text(phoneNum), new FlowBean(phoneNum, up_flow_sum, d_flow_sum));
}
}
public static class Reduces extends Reducer<Text, FlowBean, Text, FlowBean> {
protected void reduce(Text arg0, Iterable<FlowBean> value, Context co