hadoop CombineFileInputFormat的使用

CombineFileInputFormat是Hadoop中用于合并小文件的输入格式,它可以将多个小文件打包成一个InputSplit,以减少启动大量Map任务的问题。通过设置mapred.max.split.size、mapred.min.split.size.per.node和mapred.min.split.size.per.rack等参数,可以调整文件合并策略,确保数据切分的均衡。在Hive中可直接设置输入格式为CombineHiveInputFormat,而在MapReduce中需要自定义相关类进行使用。
摘要由CSDN通过智能技术生成

CombineFileInputFormat作用:将多个小文件打包成一个InputSplit提供给一个Map处理,避免因为大量小文件问题,启动大量任务

CombineFileInputFormat是一种新的inputformat,用于将多个文件合并成一个单独的split,另外,它会考虑数据的存储位置。旧版本的MultiFileInputFormat是按文件单位切分,可能造成split不均匀,如果有一个大文件则会单独由一个map处理,严重偏慢

CombineFileInputFormat是个抽象类,需要手工实现

1、Hive中可以设置:

set mapred.max.split.size=256000000; //合并的每个map大小

Set mapred.min.split.size.per.node=256000000 //控制一个节点上split的至少的大小,mapred.max.split.size大小切分文件后,剩余大小如果超过mapred.min.split.size.per.node则作为一个分片,否则保留等待rack层处理

Set  Mapred.min.split.size.per.rack=256000000  // 控制一个交换机下split至少的大小,合并碎片文件,按mapred.max.split.size分割,最后若剩余大小超过 Mapred.min.split.size.per.rack则作为单独的一分片

最后合并不同rack下的碎片,按mapred.max.split.size分割,剩下的碎片无论大小作为一个split

set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat

 

2、Mapreduce中使用

自定义类MyMultiFileInputFormat,代码参考其他博客

 

import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;

public class MyMultiFileInputFormat extends CombineFileInputFormat<MultiFileInputWritableComparable, Text>  
{
    public RecordReader<MultiFileInputWritableComparable,Text> createRecordReader(InputSplit split,TaskAttemptContext context) throws IOException 
    {
      return new org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader<MultiFileInputWritableComparable, Text>
      ((CombineFileSplit)split, context, CombineFileLineRecordReader.class);
//CombineFileLineRecordReader.class为自定义类,一个split可能对应多个path则系统自带类..input.CombineFileRecordReader会通过java反射,针对不同的path分别构建自定义的CombineFileLineRecordReader去读key,value数据,具体看input.CombineFileRecordReader类源码
}
    
}

自定义CombineFileLineRecordReader类:

 

import java.io.IOException;

import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.util.LineReader;

@SuppressWarnings("deprecation")
public class CombineFileLineRecordReader extends RecordReader<MultiFileInputWritableComparable, Text> {

	private long startOffset; // offset of the chunk;
	private long end; // end of the chunk;
	private long pos; // current pos
	private FileSystem fs;
	private Path path; // path of hdfs
	private MultiFileInputWritableComparable key;
	private Text value; // value should be string(hadoop Text)

	private FSDataInputStream fileIn;
	private LineReader reader;

	public CombineFileLineRecordReader(CombineFileSplit split, TaskAttemptContext context, Integer index) throws IOException 
	{
		fs = FileSystem.get(context.getConfiguration());
		this.path = split.getPath(index);
		this.startOffset = split.getOffset(index);
		this.end = startOffset + split.getLength(index);
		boolean skipFirstLine = false;

		fileIn = fs.open(path); // open the file
		if (startOffset != 0) {
			skipFirstLine = true;
			--startOffset;
			fileIn.seek(startOffset);
		}
		reader = new LineReader(fileIn);
		if (skipFirstLine) // skip first line and re-establish "startOffset".
		{
			int readNum = reader.readLine(new Text(),0,(int) Math.min((long) Integer.MAX_VALUE, end - startOffset));
			startOffset += readNum;
		}
		this.pos = startOffset;
	}

	public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException 
	{}

	public void close() throws IOException 
	{
		reader.close();
	}

	public float getProgress() throws IOException 
	{
		if (startOffset == end) {
			return 0.0f;
		} else {
			return Math.min(1.0f, (pos - startOffset) / (float) (end - startOffset));
		}
	}

	public boolean nextKeyValue() throws IOException 
	{
		if (key == null) {
			key = new MultiFileInputWritableComparable();
			key.setFileName(path.getName());
		}
		key.setOffset(pos);
		if (value == null) {
			value = new Text();
		}
		int newSize = 0;
		if (pos < end) {
			newSize = reader.readLine(value);
			pos += newSize;
		}
		if (newSize == 0) {
			key = null;
			value = null;
			return false;
		} else {
			return true;
		}
	}

	public MultiFileInputWritableComparable getCurrentKey() throws IOException, InterruptedException 
	{
		return key;
	}

	public Text getCurrentValue() throws IOException, InterruptedException 
	{
		return value;
	}
}


MultiFileInputWritableComparable类

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;

@SuppressWarnings("rawtypes")
public class MultiFileInputWritableComparable implements WritableComparable {

    private long offset;       //offset of this file block
    private String fileName;   //filename of this block

    public long getOffset() {
		return offset;
	}

	public void setOffset(long offset) {
		this.offset = offset;
	}

	public String getFileName() {
		return fileName;
	}

	public void setFileName(String fileName) {
		this.fileName = fileName;
	}

	public void readFields(DataInput in) throws IOException {
      this.offset = in.readLong();
      this.fileName = Text.readString(in);
    }

    public void write(DataOutput out) throws IOException {
      out.writeLong(offset);
      Text.writeString(out, fileName);
    }

    public int compareTo(Object object) {
      MultiFileInputWritableComparable that = (MultiFileInputWritableComparable)object;

      int compare = this.fileName.compareTo(that.fileName);
      if(compare == 0) {
        return (int)Math.signum((double)(this.offset - that.offset));
      }
      return compare;
    }
    @Override
    public boolean equals(Object object) {
      if(object instanceof MultiFileInputWritableComparable)
        return this.compareTo(object) == 0;
      return false;
    }
    @Override
    public int hashCode() {
      assert false : "hashCode not designed";
      return 42; //an arbitrary constant
    }
}


测试

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

@SuppressWarnings("deprecation")
public class MultiFileWordCount extends Configured implements Tool {


  public static class MapClass extends 
      Mapper<MultiFileInputWritableComparable, Text, Text, IntWritable> {
    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();
    
    public void map(MultiFileInputWritableComparable key, Text value, Context context)
        throws IOException, InterruptedException {
      
      String line = value.toString();
      StringTo
  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值