用WordCount读懂MapReduce框架

1.MapReduce框架介绍

MapReduce

2.MapReduce之WordCount代码
一个MapReduce代码可以包括以下几个部分
  • Mapper
  • Partitioner
  • Combiner
  • Reducer
  1. 自定义类
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class WCBean implements WritableComparable<WCBean> {
    private String word = "";

    public String getWord() {
        return this.word;
    }

    public void setWord(String word) {
        this.word = word;
    }

    public int compareTo(WCBean wbBean) {
        int compareResult = this.word.compareTo(wbBean.getWord());

        return compareResult;
    }

    public void write(DataOutput dataOutput) throws IOException {
        dataOutput.writeUTF(this.word);
    }

    public void readFields(DataInput dataInput) throws IOException {
        this.word = dataInput.readUTF();
    }

    @Override
    public String toString() {
        return this.word + " ";
    }
}
  1. Mapper
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class WCMapper extends Mapper<LongWritable, Text,WCBean, IntWritable> {

    IntWritable val = new IntWritable();
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String line = value.toString();
        String[] fields = line.split(",");

        for(String field:fields){
            WCBean wcBean = new WCBean();
            wcBean.setWord(field);
            val.set(1);
            context.write(wcBean,val);
        }
    }
}
  1. Partitioner
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Partitioner;

public class WCPartitioner extends Partitioner<WCBean, IntWritable> {
    @Override
    public int getPartition(WCBean wcBean, IntWritable intWritable, int i) {
        String val = wcBean.getWord();
        String firstLetter = val.substring(0,1);
        int partitionNum = 1;
        if(firstLetter.equals("h") || firstLetter.equals("s")){
            partitionNum = 0;
        }
        return partitionNum;
    }
}
  1. Combiner
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class WCCombiner extends Reducer<WCBean, IntWritable,WCBean, IntWritable> {
    IntWritable valSum = new IntWritable();
    @Override
    protected void reduce(WCBean key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        int cnt = 0;
        for(IntWritable val:values){
            cnt = cnt + val.get();
        }
        valSum.set(cnt);
        context.write(key,valSum);
    }
}
  1. Reducer
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class WCReducer extends Reducer<WCBean, IntWritable,WCBean,IntWritable> {
    IntWritable valSum = new IntWritable();
    @Override
    protected void reduce(WCBean key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        int cnt = 0;
        for(IntWritable val:values){
            cnt = cnt + val.get();
        }
        valSum.set(cnt);
        context.write(key,valSum);
    }
}
  1. OutputFormat
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;

public class WCOutputFormat extends FileOutputFormat<WCBean, IntWritable> {
    @Override
    public RecordWriter<WCBean, IntWritable> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
        return new WCRecordWriter(job);
    }
}
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;

import java.io.IOException;

public class WCRecordWriter extends RecordWriter<WCBean, IntWritable> {
    private static String firstHDFSPath = "hdfs:///data/result/1";
    private static String secondHDFSPath = "hdfs:///data/result/2";
    private FSDataOutputStream firstOutputStream;
    private FSDataOutputStream secondOutputStream;
    public WCRecordWriter(TaskAttemptContext job) throws  IOException{
        FileSystem fileSystem = FileSystem.get(job.getConfiguration());
        this.firstOutputStream = fileSystem.create(new Path(firstHDFSPath));
        this.secondOutputStream = fileSystem.create(new Path(secondHDFSPath));
    }

    public void write(WCBean wcBean, IntWritable val) throws IOException, InterruptedException {
        String word = wcBean.getWord();
        if(word.length() > 4){
            firstOutputStream.write((wcBean.toString() + ",").getBytes());
            firstOutputStream.write((String.valueOf(val.get())).getBytes());
            firstOutputStream.write("\n".getBytes());
        } else{
            secondOutputStream.write((wcBean.toString() + ",").getBytes());
            secondOutputStream.write(String.valueOf(val.get()).getBytes());
            secondOutputStream.write("\n".getBytes());
        }
    }

    public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
        IOUtils.closeStream(firstOutputStream);
        IOUtils.closeStream(secondOutputStream);
    }
}
  1. Driver
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.log4j.BasicConfigurator;

import java.io.IOException;

public class WCDriver {
    public static String HDFS_PATH = "hdfs://dong:9000";
    public static String INPUT_PATH = "hdfs:///data/test";
    public static String OUTPUT_PATH = "hdfs:///data/result";

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        BasicConfigurator.configure();
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS",HDFS_PATH);
        Job job = Job.getInstance(conf);

        job.setJarByClass(WCDriver.class);
        job.setMapperClass(WCMapper.class);
        job.setPartitionerClass(WCPartitioner.class);
        job.setCombinerClass(WCCombiner.class);
        job.setReducerClass(WCReducer.class);
        job.setOutputFormatClass(WCOutputFormat.class);

        job.setMapOutputKeyClass(WCBean.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(WCBean.class);
        job.setOutputValueClass(IntWritable.class);

        //job.setNumReduceTasks(2);

        Path inputPath = new Path(INPUT_PATH);
        Path outputPath = new Path(OUTPUT_PATH);
        FileInputFormat.setInputPaths(job,inputPath);
        FileOutputFormat.setOutputPath(job,outputPath);

        FileSystem fileSystem = FileSystem.get(conf);

        if(fileSystem.exists(outputPath)){
            fileSystem.delete(outputPath,true);
        }

        boolean result = job.waitForCompletion(true);
        System.exit(result?0:1);
    }
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值