MapReduce编程小案例.4th—倒排索引创建

MapReduce编程小案例.4th—倒排索引创建

需求:有大量的文本文档,如下所示:

a.txt

hello tom

hello jim

hello kitty

hello rose

 

b.txt

hello jerry

hello jim

hello kitty

hello jack

 

c.txt

hello jerry

hello java

hello c++

hello c++

 

需要得到以下结果:

hello  a.txt-->4  b.txt-->4  c.txt-->4

java   c.txt-->1

jerry  b.txt-->1  c.txt-->1

....

思路:

1、先写一个mr程序:统计出每个单词在每个文件中的总次数

hello-a.txt 4

hello-b.txt 4

hello-c.txt 4

java-c.txt 1

jerry-b.txt 1

jerry-c.txt 1

 

要点1:map方法中,如何获取所处理的这一行数据所在的文件名?

worker在调map方法时,会传入一个context,而context中包含了这个worker所读取的数据切片信息,而切片信息又包含这个切片所在的文件信息

那么,就可以在map中:

FileSplit split =context.getInputSplit();

String fileName =split.getpath().getName();

 

要点2setup方法                                                                
worker在正式处理数据之前,会先调用一次setup方法,所以,常利用这个机制来做一些初始化操作;

 


2、然后在写一个mr程序,读取上述结果数据:

map: 根据-切,以单词做key,后面一段作为value

reduce: 拼接values里面的每一段,以单词做key,拼接结果做value,输出即可


实现代码如下:

package cn.edu360.mr.index;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class IndexStepOne {
	
	public static class IndexStepOneMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
		
		//产生<hello-文件名,1>
		@Override
		protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
				throws IOException, InterruptedException {
			
			//从输入切片信息中获取正在处理的一行数据所属的文件
			FileSplit inputSplit = (FileSplit)context.getInputSplit();
			String fileName = inputSplit.getPath().getName();
			
			String[] words = value.toString().split(" ");
			for (String w : words) {
				
				//将单词-文件名作为key,1作为value输出
				context.write(new Text(w + "-" + fileName), new IntWritable(1));
				
			}


		}
		
	}
	
	public static class IndexStepOneReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
		@Override
		protected void reduce(Text key, Iterable<IntWritable> values,
				Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {

			int count = 0;
			for (IntWritable value : values) {
				count +=value.get();
				
			}
			context.write(key, new IntWritable(count));
		}
	}
	
	
	
	public static void main(String[] args) throws Exception {
		
		Configuration conf = new Configuration();
		
		Job job = Job.getInstance(conf);
		
		job.setJarByClass(IndexStepOne.class);
		
		job.setMapperClass(IndexStepOneMapper.class);
		job.setReducerClass(IndexStepOneReducer.class);
		
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		
		FileInputFormat.addInputPath(job, new Path("F:\\mrdata\\index\\input"));
		FileOutputFormat.setOutputPath(job, new Path("F:\\mrdata\\index\\out1"));
		
		job.setNumReduceTasks(3);
		job.waitForCompletion(true);
		
		
		
		
		
		
	}
	

}


package cn.edu360.mr.index;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class IndexStepTwo {
	
	public static class IndexStepTwoMapper extends Mapper<LongWritable, Text, Text, Text>{
		
		//产生<hello-文件名,1>
		@Override
		protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context)
				throws IOException, InterruptedException {
			

			String[] split = value.toString().split("-");
			
			context.write(new Text(split[0]), new Text(split[1].replaceAll("\t", "-->")));


		}
		
	}
	
	public static class IndexStepTwoReducer extends Reducer<Text, Text, Text, Text>{
		
		//一组数据:<hello,a.txt-->4> <hello,b.txt-->4> <hello,c.txt-->4>
		@Override
		protected void reduce(Text key, Iterable<Text> values,
				Reducer<Text, Text, Text, Text>.Context context) throws IOException, InterruptedException {
           //StringBuffer是线程安全的,StringBuilder是非线程安全的,在不涉及线程安全下,StringBuilder更快
			StringBuilder sb = new StringBuilder();
			
			for (Text value : values) {
				sb.append(value.toString()).append("\t");
				
			}
			context.write(key, new Text(sb.toString()));
			

		}
	}
	
	
	
	public static void main(String[] args) throws Exception {
		
		Configuration conf = new Configuration();
		
		Job job = Job.getInstance(conf);
		
		job.setJarByClass(IndexStepTwo.class);
		
		job.setMapperClass(IndexStepTwoMapper.class);
		job.setReducerClass(IndexStepTwoReducer.class);
		
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(Text.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(Text.class);
		
		FileInputFormat.addInputPath(job, new Path("F:\\mrdata\\index\\out1"));
		FileOutputFormat.setOutputPath(job, new Path("F:\\mrdata\\index\\out2"));
		
      
		job.waitForCompletion(true);
		
		
		
		
		
		
	}
	

}

阅读更多
想对作者说点什么? 我来说一句

没有更多推荐了,返回首页