MapReduce之分箱
模式描述
分箱模式与前面的模式很类似,都是在不考虑记录顺序的情况下对记录进行分类。
目的
将数据集中每条记录归档到一个或多个类别。
性能分析
分箱模式是在map段对数据进行拆分,这和其他只有map任务的作业一样,该模式具有相同的可扩展性和性能特性,该模式不需要执行排序,混排以及reduce,并且大部分处理都是在本地数据上执行。
问题描述
在数据集中按照标签(hadoop,hbase,hiv,ping,mongoDB)将帖子分别放到4个箱子中。
样例输入
创建数据集的代码如下:
import java.io.*;
import java.util.Random;
public class create {
public static String getRandomChar(int length) { //生成随机字符串
Random random = new Random();
StringBuffer buffer = new StringBuffer();
for (int i = 0; i < length; i++) {
buffer.append((char)('a'+random.nextInt(26)));
}
return buffer.toString();
}
public static void main(String[] args) throws Exception{
String path="input/file.txt";
File file=new File(path);
if(!file.exists()){
file.getParentFile().mkdirs();
}
file.createNewFile();
FileWriter fw=new FileWriter(file,true);
BufferedWriter bw=new BufferedWriter(fw);
String[] uppack={"hadoop","hive","pig","hbase","mongoDB"};
for(int i=0;i<5000;i++){
int id=(int)(Math.random()*10000+10000);
bw.write("id = "+id+" comments = "+getRandomChar(15)+" tags = "+uppack[(int)(Math.random()*5)]+'\n');
}
}
}
运行结果如下
该数据包含ID,评论及标签
样例输出
输出为几个txt文档,每个文档下包含相对应的标签
mapper阶段任务
在setup阶段适用输入的上下文创建MultipleOutputs实例,mapper由多个if-else语句组成,目的是检查帖子中每个标签,并将这个帖子写入到该标签所对应的箱子中,最后在clearup阶段将MultipleOutputs实例关闭。
mapper阶段编码如下
public static class BinningMapper extends Mapper<Object,Text,Text,NullWritable>{
private MultipleOutputs<Text,NullWritable> mos=null;
protected void setup(Context context){
mos=new MultipleOutputs(context);
}
public void map(Object key,Text value,Context context) throws IOException,InterruptedException{
String line=value.toString();
String rawtags=line.substring(line.indexOf("tags")+7);
// "hadoop","hive","pig","hbase","mongoDB"
if(rawtags.matches("hadoop")){
mos.write("bins",value,NullWritable.get(),"hadoop-tag");
}else if(rawtags.matches("hive")){
mos.write("bins",value,NullWritable.get(),"hive-tag");
}else if(rawtags.matches("pig")){
mos.write("bins",value,NullWritable.get(),"pig-tag");
}else if(rawtags.matches("hbase")){
mos.write("bins",value,NullWritable.get(),"hbase-tag");
}else if(rawtags.matches("mongoDB")){
mos.write("bins",value,NullWritable.get(),"mongoDB-tag");
}
}
public void cleanup(Context context) throws IOException,InterruptedException{
mos.close();
}
}
完整代码如下
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import java.io.IOException;
import java.text.ParseException;
public class Binning {
public static class BinningMapper extends Mapper<Object,Text,Text,NullWritable>{
private MultipleOutputs<Text,NullWritable> mos=null;
protected void setup(Context context){
mos=new MultipleOutputs(context);
}
public void map(Object key,Text value,Context context) throws IOException,InterruptedException{
String line=value.toString();
String rawtags=line.substring(line.indexOf("tags")+7);
// "hadoop","hive","pig","hbase","mongoDB"
if(rawtags.matches("hadoop")){
mos.write("bins",value,NullWritable.get(),"hadoop-tag");
}else if(rawtags.matches("hive")){
mos.write("bins",value,NullWritable.get(),"hive-tag");
}else if(rawtags.matches("pig")){
mos.write("bins",value,NullWritable.get(),"pig-tag");
}else if(rawtags.matches("hbase")){
mos.write("bins",value,NullWritable.get(),"hbase-tag");
}else if(rawtags.matches("mongoDB")){
mos.write("bins",value,NullWritable.get(),"mongoDB-tag");
}
}
public void cleanup(Context context) throws IOException,InterruptedException{
mos.close();
}
}
public static void main(String[] args) throws Exception{
FileUtil.deleteDir("output");
Configuration configuration=new Configuration();
String[] otherArgs=new String[]{"input/file.txt","output"};
if(otherArgs.length!=2){
System.err.println("参数错误");
System.exit(2);
}
Job job=new Job(configuration,"Binning");
job.setMapperClass(BinningMapper.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
MultipleOutputs.addNamedOutput(job,"bins", TextOutputFormat.class,Text.class,NullWritable.class);
MultipleOutputs.setCountersEnabled(job,true);
job.setNumReduceTasks(0);
FileInputFormat.addInputPath(job,new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job,new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true)?0:1);
}
}