数据:
在上个例子的统计需要基础上添加一个新需求:按省份统计,不同省份的手机号放到不同的文件里。
例如137表示属于河北,138属于河南,那么在结果输出时,他们分别在不同的文件中。
(2)实现思路
map和reduce的处理思路与上例相同,这里需要多做2步:
自定义一个分区器Partitioner
根据手机号判断属于哪个分区。有几个分区就有几个reducetask,每个reducetask输出一个文件,那么,不同分区中的数据就写入了不同的结果文件中。
在main程序中指定使用我们自定义的Partitioner即可
Pom文件如下:
项目起名为:comtoo
先定义分区类:MyPartitioner
这段代码是本示例的重点,其中定义了一个hashmap,假设其是一个数据库,定义了手机号和分区的关系。
getPartition取得手机号的前缀,到数据库中获取区号,如果没在数据库中,就指定其为“其它分区”(用4代表)
package comtoo;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
public class MyPartitioner extends Partitioner<Text, FlowBean> {
@Override
public int getPartition(Text key, FlowBean value, int partitionNum) {
int phoneAre=Integer.parseInt(key.toString()) ;
if(phoneAre==133){
return 0;
}
if(phoneAre==135){
return 1;
}
if(phoneAre==137){
return 2;
}
if(phoneAre==138){
return 3;
}
return 4;
}
}
再定义用于序列化的类:FlowBean
package comtoo;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
public class FlowBean implements Writable{
int upFlow;
int downFlow;
double toFlow;
public FlowBean(int upFlow,int downFlow,double toFlow){
this.upFlow=upFlow;
this.downFlow=downFlow;
this.toFlow=toFlow;
}
public int getUpFlow() {
return upFlow;
}
public void setUpFlow(int upFlow) {
this.upFlow = upFlow;
}
public int getDownFlow() {
return downFlow;
}
public void setDownFlow(int downFlow) {
this.downFlow = downFlow;
}
public double getToFlow() {
return toFlow;
}
public void setToFlow(int toFlow) {
this.toFlow = toFlow;
}
@Override
public void readFields(DataInput in) throws IOException {
upFlow = in.readInt();
downFlow = in.readInt();
toFlow = in.readDouble();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(upFlow);
out.writeInt(downFlow);
out.writeDouble(toFlow);
}
@Override
public String toString() {
return upFlow+":"+downFlow+":"+toFlow;
}
public FlowBean() {
super();
}
}
最后定义包含Mapper和Reducer的实现类和主方法在内的主类:FlowWritable
package comtoo;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FlowWritable {
public static void main(String[] args) throws Exception {
//获得配置项
Configuration conf = new Configuration();
//获得工作流
Job job = Job.getInstance(conf);
//获得hdfs文件操作
FileSystem fs = FileSystem.get(conf);
//获得输入文件路径
Path inputpath = new Path(args[0]);
获得输出文件路径
Path outputpath = new Path(args[1]);
//判断如果文件输出路径已存在则删除
if(fs.exists(outputpath)){
fs.delete(outputpath, true);
}
//设置启动类
job.setJarByClass(FlowWritable.class);
//设置Job工作名
job.setJobName("Flow");
//设置Mapper的启动类
job.setMapperClass(Map.class);
//设置reducer的启动类
job.setReducerClass(Red.class);
//设置输入输出格式
FileInputFormat.setInputPaths(job, inputpath);
FileOutputFormat.setOutputPath(job, outputpath);
//设置Map输出格式
job.setMapOutputKeyClass(Text.class);
//设置reducer输出格式
job.setOutputValueClass(FlowBean.class);
//设置分区启动类
job.setPartitionerClass(MyPartitioner.class);
//设置reducer启动的数量
job.setNumReduceTasks(5);
//设置工作流启动
job.waitForCompletion(true);
}
public static class Map extends Mapper<LongWritable,Text,Text,FlowBean>{
public void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException{
String[] line = value.toString().split(" ");
FlowBean fl = new FlowBean(Integer.parseInt(line[0]),Integer.parseInt(line[1]), Double.parseDouble(line[2]));
context.write(new Text(line[0]),fl);
}
}
public static class Red extends Reducer<Text,FlowBean,Text,Text>{
public void reduce(Text key,Iterable<FlowBean> value,Context context) throws IOException, InterruptedException{
double dd=0;
int cc=0;
int bb=0;
for (FlowBean fl : value) {
if(fl.getToFlow()>dd){
dd=fl.getToFlow();
cc=fl.getDownFlow();
bb=fl.getUpFlow();
}
}
context.write(key, new Text(bb+":"+cc+":"+dd));
}
}
}