MapReduce的mapJoin算法

下面是一个完整的mapJoin源码,可以仔细品品哦!!
所需的数据:

orders.txt
	1001,20150710,p0001,2
	1002,20150710,p0002,3
	1002,20150710,p0003,3
product.txt
	p0001,小米5,1000,2000
	p0002,锤子T1,1000,3000

mapJoin只需要两个类,一个是map,一个驱动类。
map类

import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;

public class mapJoin extends Mapper<LongWritable, Text,Text,Text> {
    HashMap<String, String> mapJoin_map = new HashMap<>();
    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        URI[] cacheFiles = DistributedCache.getCacheFiles(context.getConfiguration());
        FileSystem fileSystem = FileSystem.get(cacheFiles[0], context.getConfiguration());
        FSDataInputStream inputStream = fileSystem.open(new Path(cacheFiles[0]));
        BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream,"UTF-8"));
        String line="";
        while ((line=reader.readLine())!=null){
            String[] split = line.split(",");
            mapJoin_map.put(split[0],split[1]+","+split[2]+","+split[3]);
        }
        reader.close();
        fileSystem.close();
    }
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String[] split = value.toString().split(",");
        String datas = mapJoin_map.get(split[2]);
        String JoinDatas = datas + "," + split[0] + "," + split[1] + "," + split[3];
        context.write(new Text(split[2]), new Text(JoinDatas));
    }
}

驱动类

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.net.URI;
public class JobMain {
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        //注意要将product.txt文件放在集群哦!!!!!
        DistributedCache.addCacheFile(new URI("hdfs://node01:8020/product.txt"),conf);
        Job job = Job.getInstance(conf,"Partiton.Partitions");
        job.setJarByClass(JobMain.class);
        job.setMapperClass(mapJoin.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);


    }
}

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值