需求分析
对于输入的数据,需要根据运营商将数据输出到三个结果文件中。
问题分析
要求将结果根据运营商的不同输出到三个文件,可以考虑使用自定义分区,因为一个ReduceTask的会输出一个结果文件,而一个分区对应一个ReduceTask。
所以我们只需要根据运营商的不同将数据分为三个分区,就可以实现运营商不同的三个结果文件。
数据准备
编写一个程序自动生成号码数据文件。
package PhoneCount;
import javax.imageio.stream.FileImageInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Random;
/***
* 用来生成电话号码
*/
public class PhoneNum {
//移动号码前三位
public static final String[] YD={
"134","135","136","137","138","139",
"150","151","152","157","158","159",
"188","187","182","183","184","178",
"172","147","198"
};
//联通号码前三位
public static final String[] LT={
"130","131","132","145","155","156",
"166","171","175","176","185","186"
};
//电信号码前三位
public static final String[] DX={
"133","149","153","173","177","180",
"181","189","199"
};
public static void main(String[] args) {
try {
writer();
} catch (IOException e) {
e.printStackTrace();
}
}
public static void writer() throws IOException {
//写到文件
FileOutputStream fos = new FileOutputStream(new File("D:\\BigdataTest\\PhoneData\\GeneratePhone.txt"));
//计数
long count=0;
Random random = new Random();
while(count<500000){
//写入三大运营商的号码
fos.write(scrand(YD).toString().getBytes());
fos.write("\t".getBytes());
fos.write(scrand(LT).toString().getBytes());
fos.write("\t".getBytes());
fos.write(scrand(DX).toString().getBytes());
count++;
if(count%3==0) fos.write("\n".getBytes());
else fos.write("\t".getBytes());
}
}
public static StringBuilder scrand(String[] company){
Random random = new Random();
int x=random.nextInt(company.length);
StringBuilder phonenum = new StringBuilder(company[x]);
//拼接后8位
for(int i=0;i<8;i++){
x=random.nextInt(10);
phonenum.append(x);
}
return phonenum;
}
}
结果示意:
代码编写
PartitionMapper编写
输入数据就行,不需要额外操作。
package Partition;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/***
* 自定义分区
*/
public class PartitionMapper extends Mapper<LongWritable, Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//获取数据
String line = value.toString();
//切分数据
String[] splits = line.split("\t");
for(String split:splits){
context.write(new Text(split),NullWritable.get());
}
}
}
PartitionReduce编写
package Partition;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class PartitionReduce extends Reducer<Text, NullWritable,Text,NullWritable> {
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
context.write(key,NullWritable.get());
}
}
MyPartition编写
package Partition;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
import java.util.Arrays;
/***
* 自定义分区
* 每个分区对应一个ReduceTask
* 通过自定义分区控制最后结果输出在哪个文件
*
* 将输入的电话号码根据运营商的不同,输出到三个结果文件中
*/
public class MyPartition extends Partitioner<Text, NullWritable> {
public static final String[] YD={
"134","135","136","137","138","139",
"150","151","152","157","158","159",
"188","187","182","183","184","178",
"172","147","198"
};
//联通号码前三位
public static final String[] LT={
"130","131","132","145","155","156",
"166","171","175","176","185","186"
};
//电信号码前三位
public static final String[] DX={
"133","149","153","173","177","180",
"181","189","199"
};
@Override
public int getPartition(Text text, NullWritable nullWritable, int i) {
//获取数据
String Phnum = text.toString();
//截取号码前三位,判断运营商
String s=Phnum.substring(0,3);
//移动0区,联通1区,电信2区
if(Arrays.asList(YD).contains(s)) return 0;
else if(Arrays.asList(LT).contains(s)) return 1;
else return 2;
}
}
PartitionDriver编写
package Partition;
import MRwordcount.WcDriver;
import MRwordcount.WcMap;
import MRwordcount.WcReduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class PartitionDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//实例化配置文件
Configuration conf = new Configuration();
//定义一个job任务
Job job = Job.getInstance(conf);
//配置job的信息
job.setJarByClass(PartitionDriver.class);
//指定自定义的mapper、mapper的数据类型到job中
job.setMapperClass(PartitionMapper.class);
job.setOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
//指定自定义的reduce以及reduce的输出数据类型,总输出类型
job.setReducerClass(PartitionReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//自定义分区
job.setPartitionerClass(MyPartition.class);
job.setNumReduceTasks(3);//与自定义的分区数对应
//配置输入数据的路径,这一部分演示的在本地运行
FileInputFormat.setInputPaths(job,new Path("D:\\BigdataTest\\PhoneData\\Partition\\GeneratePhone.txt"));
//配置输出数据的路径
FileOutputFormat.setOutputPath(job,new Path("D:\\BigdataTest\\PhoneData\\Partition\\PartitionResult"));
/*//配置输入数据的路径,这一部分是打包上传到集群运行
FileInputFormat.setInputPaths(job,new Path(args[0]));
//配置输出数据的路径
FileOutputFormat.setOutputPath(job,new Path(args[1]));*/
//提交任务
job.waitForCompletion(true);
}
}
需要注意的是要相比起之前的项目,需要设置ReduceTask的数量,与你自定义的分区数对应,否则可能出现错误。
结果展示
可以看到结果确实是输出到了三个结果文件中。