hadoop CombineFileInputFormat

hadoop在每一个数据储存节点启动一个map任务来处理数据,如果数据块太大,而map任务设定的分块大小小于数据块,那么一个数据块被拆分成多个数据块并拷贝到其它节点进行执行,这样有利于处理性能,特别是比较耗时的计算,希望数据分块较小;同时如果希望一个map节点处理较大的数据块,则可以设定mapred.min.split.size参数,将此参数调大,则可以让一个节点处理一个数据块;这是两种常见的需求,相应的此两类需求的解决方法都很直接或者说很简单。对应的,如果hadoop任务处理大量的小文件,例如每个文件几K大小,然而有上万的数据,则希望用少量map节点来处理任务,这样减少map的申请数目,减少资源消耗,同时也会加快处理速度。hadoop提供了CombineFileInputFormat接口,然而没有默认实现;比较费劲,庆幸hadoop源码或者发行的jar包中都包含example目录或example.jar,里面有一示例,经测试,顺利通过,所以将此实现贴出来,希望能对他人有所帮助。


ps: hadoop版本问题比较多,所以下面代码把所有import都贴进来了。

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;

@SuppressWarnings("rawtypes")
public class MultiFileInputWritableComparable implements WritableComparable {

    private long offset;       //offset of this file block
    private String fileName;   //filename of this block

    public long getOffset() {
		return offset;
	}

	public void setOffset(long offset) {
		this.offset = offset;
	}

	public String getFileName() {
		return fileName;
	}

	public void setFileName(String fileName) {
		this.fileName = fileName;
	}

	public void readFields(DataInput in) throws IOException {
      this.offset = in.readLong();
      this.fileName = Text.readString(in);
    }

    public void write(DataOutput out) throws IOException {
      out.writeLong(offset);
      Text.writeString(out, fileName);
    }

    public int compareTo(Object object) {
      MultiFileInputWritableComparable that = (MultiFileInputWritableComparable)object;

      int compare = this.fileName.compareTo(that.fileName);
      if(compare == 0) {
        return (int)Math.signum((double)(this.offset - that.offset));
      }
      return compare;
    }
    @Override
    public boolean equals(Object object) {
      if(object instanceof MultiFileInputWritableComparable)
        return this.compareTo(object) == 0;
      return false;
    }
    @Override
    public int hashCode() {
      assert false : "hashCode not designed";
      return 42; //an arbitrary constant
    }
}

import java.io.IOException;

import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.util.LineReader;

@SuppressWarnings("deprecation")
public class CombineFileLineRecordReader extends RecordReader<MultiFileInputWritableComparable, Text> {

	private long startOffset; // offset of the chunk;
	private long end; // end of the chunk;
	private long pos; // current pos
	private FileSystem fs;
	private Path path; // path of hdfs
	private MultiFileInputWritableComparable key;
	private Text value; // value should be string(hadoop Text)

	private FSDataInputStream fileIn;
	private LineReader reader;

	public CombineFileLineRecordReader(CombineFileSplit split, TaskAttemptContext context, Integer index) throws IOException 
	{
		fs = FileSystem.get(context.getConfiguration());
		this.path = split.getPath(index);
		this.startOffset = split.getOffset(index);
		this.end = startOffset + split.getLength(index);
		boolean skipFirstLine = false;

		fileIn = fs.open(path); // open the file
		if (startOffset != 0) {
			skipFirstLine = true;
			--startOffset;
			fileIn.seek(startOffset);
		}
		reader = new LineReader(fileIn);
		if (skipFirstLine) // skip first line and re-establish "startOffset".
		{
			int readNum = reader.readLine(new Text(),0,(int) Math.min((long) Integer.MAX_VALUE, end - startOffset));
			startOffset += readNum;
		}
		this.pos = startOffset;
	}

	public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException 
	{}

	public void close() throws IOException 
	{
		reader.close();
	}

	public float getProgress() throws IOException 
	{
		if (startOffset == end) {
			return 0.0f;
		} else {
			return Math.min(1.0f, (pos - startOffset) / (float) (end - startOffset));
		}
	}

	public boolean nextKeyValue() throws IOException 
	{
		if (key == null) {
			key = new MultiFileInputWritableComparable();
			key.setFileName(path.getName());
		}
		key.setOffset(pos);
		if (value == null) {
			value = new Text();
		}
		int newSize = 0;
		if (pos < end) {
			newSize = reader.readLine(value);
			pos += newSize;
		}
		if (newSize == 0) {
			key = null;
			value = null;
			return false;
		} else {
			return true;
		}
	}

	public MultiFileInputWritableComparable getCurrentKey() throws IOException, InterruptedException 
	{
		return key;
	}

	public Text getCurrentValue() throws IOException, InterruptedException 
	{
		return value;
	}
}

import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;

public class MyMultiFileInputFormat extends CombineFileInputFormat<MultiFileInputWritableComparable, Text>  
{
    public RecordReader<MultiFileInputWritableComparable,Text> createRecordReader(InputSplit split,TaskAttemptContext context) throws IOException 
    {
      return new CombineFileRecordReader<MultiFileInputWritableComparable, Text>
      ((CombineFileSplit)split, context, CombineFileLineRecordReader.class);
    }
    
}

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

@SuppressWarnings("deprecation")
public class MultiFileWordCount extends Configured implements Tool {


  public static class MapClass extends 
      Mapper<MultiFileInputWritableComparable, Text, Text, IntWritable> {
    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();
    
    public void map(MultiFileInputWritableComparable key, Text value, Context context)
        throws IOException, InterruptedException {
      
      String line = value.toString();
      StringTokenizer itr = new StringTokenizer(line);
      while (itr.hasMoreTokens()) {
        word.set(itr.nextToken());
        context.write(word, one);
      }
    }
  }
  
  private void printUsage() {
    System.out.println("Usage : multifilewc <input_dir> <input_dir> <output>" );
  }

  public int run(String[] args) throws Exception {


	Configuration conf = new Configuration();
	String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if(otherArgs.length < 3) { //修改为3,前2个参数为输入文件,后1个为输出目录
      printUsage();
      return 2;
    }

    Job job = new Job(getConf());
    job.setJobName("MultiFileWordCount");
    job.setJarByClass(MultiFileWordCount.class);

    //set the InputFormat of the job to our InputFormat
    job.setInputFormatClass(MyMultiFileInputFormat.class);
    
    // the keys are words (strings)
    job.setOutputKeyClass(Text.class);
    // the values are counts (ints)
    job.setOutputValueClass(IntWritable.class);

    //use the defined mapper
    job.setMapperClass(MapClass.class);
    //use the WordCount Reducer
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);

    FileInputFormat.addInputPaths(job, otherArgs[0]);//修改为3,前2个参数为输入文件,后1个为输出目录
    FileInputFormat.addInputPaths(job, otherArgs[1]);
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));

    return job.waitForCompletion(true) ? 0 : 1;
  }

  public static void main(String[] args) throws Exception {
    int ret = ToolRunner.run(new MultiFileWordCount(), args);
    System.exit(ret);
  }

}

执行命令: hadoop jar WordCount.jar /user/lmc/201208150930/part-00000 /user/lmc/201208150930/part-00001 /user/lmc/mutiwordcount

执行成功

评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值