话不多说,直接上代码:
package Compression;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MultiFileInputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.MultipleInputs;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class Multipule_worldCount {
//读取逗号分隔符map
public static class dhMaper extends Mapper<LongWritable, Text,Text,LongWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] values = value.toString().split(",");
for (String val:values){
Text k2 = new Text(val);
LongWritable v2 = new LongWritable(1);
context.write(k2,v2);
}
}
}
//读取空格分隔符map
public static class kgMapper extends Mapper<LongWritable, Text,Text,LongWritable>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] values = value.toString().split("\\s+");
for (String val:values){
Text k2 = new Text(val);
LongWritable v2 = new LongWritable(1);
context.write(k2,v2);
}
}
}
//reduce函数
public static class myReducer extends Reducer<Text,LongWritable,Text,LongWritable>{
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
Long sum =new Long(0) ;
for (LongWritable val:values) sum += val.get();
LongWritable v2 = new LongWritable(sum);
context.write(key,v2);
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
System.setProperty("HADOOP_USER_NAME","root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://bg00:9000");
conf.set("dfs.client.use.datanode.hostname","true");
Job job = Job.getInstance(conf);
job.setJarByClass(Multipule_worldCount.class);
//利用MultipleInputs实现多种map的输入,需要指定Job,路径,InputFormat,需要执行的Maper类
MultipleInputs.addInputPath(job,new Path("/s1/"), TextInputFormat.class,kgMapper.class);
MultipleInputs.addInputPath(job,new Path("/s2/"),TextInputFormat.class,dhMaper.class);
FileOutputFormat.setOutputPath(job,new Path("/tmp/mul2"));
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setReducerClass(myReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
job.waitForCompletion(true);
}
}