MutltiInputs(多输入);使用多个输入作为job的输入来源
---------------------------------------------
也就是在InputFormat 前把添加各种不同的序列源
里面的方法也就是 addInputPath等等。。。。
map也可以在这个流程中套进来
APP类:
package com.mao.hdfs.multiInput;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.MultipleInputs;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WCApp {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS","file:///"); //本地运行
Job job = Job.getInstance(conf);
//设置job属性
job.setJobName("WCAppMulti"); //作业名称
job.setJarByClass(WCApp.class); //搜索类
//多个输入
MultipleInputs.addInputPath(job,new Path("file:///d:/mr/txt"), TextInputFormat.class,WCTextMapper.class);
MultipleInputs.addInputPath(job,new Path("file:///d:/mr/seq"), SequenceFileInputFormat.class,WCSeqMapper.class);
//设置输出
FileOutputFormat.setOutputPath(job,new Path(args[0]));
job.setReducerClass(WCReducer.class); //reduce类
job.setNumReduceTasks(3); //reduce个数
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class); //输出map
job.setOutputKeyClass(Text.class); //输出key
job.setOutputValueClass(IntWritable.class); //输出value
job.waitForCompletion(true);
}
}
mapper类:
package com.mao.hdfs.multiInput;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* WCTextMapper
*/
public class WCTextMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
Text keyOut = new Text();
IntWritable valueOut = new IntWritable();
String[] arr = value.toString().split(" ");
for (String s: arr){
keyOut.set(s);
valueOut.set(1);
context.write(keyOut,valueOut);
}
}
}
package com.mao.hdfs.multiInput;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* WCTextMapper
*/
public class WCSeqMapper extends Mapper<IntWritable, Text, Text, IntWritable> {
protected void map(IntWritable key, Text value, Context context) throws IOException, InterruptedException {
Text keyOut = new Text();
IntWritable valueOut = new IntWritable();
String[] arr = value.toString().split(" ");
for (String s: arr){
keyOut.set(s);
valueOut.set(1);
context.write(keyOut,valueOut);
}
}
}
Reducer类:
package com.mao.hdfs.multiInput;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* WCReducer
*/
public class WCReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count = 0;
for (IntWritable iw : values){
count = count + iw.get();
}
context.write(key,new IntWritable(count));
}
}