倒排索引案例

1 需求:有大量的文本(文档、网页),需要建立搜索索引
数据:

a.txt
guigui pingping
guigui ss
guigui ss

b.txt
guigui pingping
guigui pingping
pingping ss

c.txt
guigui ss
guigui pingping

1)第一次预期输出结果

guigui--a.txt   3
guigui--b.txt   2
guigui--c.txt   2
pingping--a.txt 1
pingping--b.txt 3
pingping--c.txt 1
ss--a.txt   2
ss--b.txt   1
ss--c.txt   1

2) 第二次预期输出结果

guigui  c.txt-->2   b.txt-->2   a.txt-->3   
pingping    c.txt-->1   b.txt-->3   a.txt-->1   
ss  c.txt-->1   b.txt-->1   a.txt-->2   

2 第一次处理
1) 第一次处理, 编写 OneIndexMapper

package com.da.index;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

public class OneIndexMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
    private String pathName;
    private Text k = new Text();
    private IntWritable v = new IntWritable(1);

    @Override
    protected void setup(Mapper<LongWritable, Text, Text, IntWritable>.Context context)
            throws IOException, InterruptedException {
        // 获取文件名称
        FileSplit inputSplit = (FileSplit) context.getInputSplit();
        pathName = inputSplit.getPath().getName();
    }

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
            throws IOException, InterruptedException {
        // 获取一行
        String line = value.toString();

        // 切割
        String[] fields = line.split(" ");

        for (String word : fields) {
            // 拼接
            k.set(word + "--" + pathName);
            // 输出
            context.write(k, v);
        }
    }
}

2) 第一次处理, 编写 OneIndexReducer

package com.da.index;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class OneIndexReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
    private IntWritable v = new IntWritable();

    @Override
    protected void reduce(Text key, Iterable<IntWritable> values,
            Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
        // 汇总
        int total = 0;
        for (IntWritable value : values) {
            total += value.get();
        }
        v.set(total);
        // 写出
        context.write(key, v);
    }
}

3)第一次处理, 编写 OneIndexDriver

package com.da.index;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class OneIndexDriver {
    public static void main(String[] args) throws Exception {
        args = new String[] { "e:/mrinput", "e:/mrout" };
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(OneIndexDriver.class);
        job.setMapperClass(OneIndexMapper.class);
        job.setReducerClass(OneIndexReducer.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.waitForCompletion(true);
    }
}

3 第二次处理
1)第二次处理, 编写 TwoIndexMapper

package com.da.index;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class TwoIndexMapper extends Mapper<LongWritable, Text, Text, Text> {
    private Text k = new Text();
    private Text v = new Text();

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context)
            throws IOException, InterruptedException {
        // 获取一行
        String line = value.toString();
        // 切割
        String[] fields = line.split("--");

        k.set(fields[0]);
        v.set(fields[1]);
        // 输出
        context.write(k, v);
    }
}

2)第二次处理, 编写 TwoIndexReducer

package com.da.index;

import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class TwoIndexReducer extends Reducer<Text, Text, Text, Text> {
    private Text v = new Text();

    @Override
    protected void reduce(Text key, Iterable<Text> values, Reducer<Text, Text, Text, Text>.Context context)
            throws IOException, InterruptedException {
        // 拼接
        StringBuilder sb = new StringBuilder("");
        for (Text value : values) {
            String v = value.toString().replace("\t", "-->");
            sb.append(v).append("\t");
        }
        v.set(sb.toString());
        // 写出
        context.write(key, v);
    }
}

3)第二次处理, 编写 TwoIndexDriver

package com.da.index;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class TwoIndexDriver {
    public static void main(String[] args) throws Exception {
        args = new String[] { "e:/mrinput", "e:/mrout" };
        Configuration config = new Configuration();
        Job job = Job.getInstance(config);
        job.setJarByClass(TwoIndexDriver.class);
        job.setMapperClass(TwoIndexMapper.class);
        job.setReducerClass(TwoIndexReducer.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);
    }
}
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值