小文件处理案例

1 需求
无论 hdfs 还是 mapreduce,对于小文件都有损效率,实践中,又难免面临处理大量小文件的场景,此时,就需要有相应解决方案。 将多个小文件合并成一个文件 SequenceFile,SequenceFile 里面存储着多个文件,存储的形式为文件路径+名称为 key, 文件内容为 value。

2 分析
小文件的优化无非以下几种方式:
1)在数据采集的时候,就将小文件或小批数据合成大文件再上传 HDFS
2) 在业务处理之前, 在 HDFS 上使用 mapreduce 程序对小文件进行合并
3)在 mapreduce 处理时,可采用 CombineTextInputFormat 提高效率

3 具体实现
本节采用自定义 InputFormat 的方式, 处理输入小文件的问题。
1)自定义一个类继承 FileInputFormat
2)改写 RecordReader,实现一次读取一个完整文件封装为 KV
3)在输出时使用 SequenceFileOutPutFormat 输出合并文件

4)程序实现:
1)自定义 InputFromat

package com.da.inputformat;

import java.io.IOException;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

public class WholeFileInputformat extends FileInputFormat<NullWritable, BytesWritable> {

    @Override
    protected boolean isSplitable(JobContext context, Path filename) {
        // 不能切割
        return false;
    }

    @Override
    public RecordReader<NullWritable, BytesWritable> createRecordReader(InputSplit split, TaskAttemptContext context)
            throws IOException, InterruptedException {
        // 创建对象
        WholeRecordReader recordReader = new WholeRecordReader();
        // 初始化
        recordReader.initialize(split, context);
        return recordReader;
    }

}

2) 自定义 RecordReader

package com.da.inputformat;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

public class WholeRecordReader extends RecordReader<NullWritable, BytesWritable> {
    private BytesWritable value = new BytesWritable();
    private boolean isProgress = false;
    private FileSplit split;
    private Configuration configuration;

    @Override
    public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
        // 初始化
        this.split = (FileSplit) split;
        this.configuration = context.getConfiguration();
    }

    @Override
    public boolean nextKeyValue() throws IOException, InterruptedException {
        // 读取一个个的文件
        if (!isProgress) {
            // 1缓冲区
            byte buf[] = new byte[(int) split.getLength()];
            FileSystem fs = null;
            FSDataInputStream fis = null;

            try {

                // 2获取文件系统
                Path path = split.getPath();
                fs = path.getFileSystem(configuration);

                // 3打开文件输入流
                fis = fs.open(path);

                // 4流的拷贝
                IOUtils.readFully(fis, buf, 0, buf.length);

                // 5 输出文件内容
                value.set(buf, 0, buf.length);
            } catch (Exception e) {
            } finally {
                IOUtils.closeStream(fis);
                IOUtils.closeStream(fs);
            }

            isProgress = true;
            return true;
        }

        return false;
    }

    @Override
    public NullWritable getCurrentKey() throws IOException, InterruptedException {
        return NullWritable.get();
    }

    @Override
    public BytesWritable getCurrentValue() throws IOException, InterruptedException {
        return value;
    }

    @Override
    public float getProgress() throws IOException, InterruptedException {
        // 当前进度
        return isProgress ? 1 : 0;
    }

    @Override
    public void close() throws IOException {

    }

}

3) SequenceFileMapper 处理流程

package com.da.inputformat;

import java.io.IOException;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

public class SequenceFileMapper extends Mapper<NullWritable, BytesWritable, Text, BytesWritable> {
    private Text k = new Text();

    @Override
    protected void setup(Mapper<NullWritable, BytesWritable, Text, BytesWritable>.Context context)
            throws IOException, InterruptedException {
        // 获取文件的路径与名称
        FileSplit split = (FileSplit) context.getInputSplit();
        Path path = split.getPath();
        k.set(path.toString());
    }

    @Override
    protected void map(NullWritable key, BytesWritable value, Context context)
            throws IOException, InterruptedException {
        context.write(k, value);
    }

}

4) SequenceFileReducer 处理流程

package com.da.inputformat;

import java.io.IOException;

import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class SequenceFileReducer extends Reducer<Text, BytesWritable, Text, BytesWritable> {
    @Override
    protected void reduce(Text key, Iterable<BytesWritable> values, Context context)
            throws IOException, InterruptedException {
        for (BytesWritable bytesWritable : values) {
            context.write(key, bytesWritable);
        }
    }
}

5) SequenceFileDriver 处理流程

package com.da.inputformat;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;

public class SequenceFileDriver {
    public static void main(String[] args) throws Exception {
        args = new String[] { "E:/mrinput/", "E:/mrout" };
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(SequenceFileDriver.class);

        job.setMapperClass(SequenceFileMapper.class);
        job.setReducerClass(SequenceFileReducer.class);

        // 设置输入的 inputFormat
        job.setInputFormatClass(WholeFileInputformat.class);
        // 设置输出的 outputFormat
        job.setOutputFormatClass(SequenceFileOutputFormat.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(BytesWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(BytesWritable.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);
    }
}
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值