自定义InputFormat,小文件合并

需求:

在Map,Reduce中,有时需要将一些小文件合并

有3个小文件,现在需要合并成一个文件

 

1:定义SequenceFileMap类

package inputFormat;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;

public class SequenceFileMap extends Mapper<NullWritable, BytesWritable, Text, BytesWritable> {
    Text k = new Text();
    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        //1.获取文件切片信息
        FileSplit inputSplit = (FileSplit) context.getInputSplit();
        //2.获取切片名称
        String pathName = inputSplit.getPath().toString();
        //3.设置key的输出
        k.set(pathName);
    }
    @Override
    protected void map(NullWritable key, BytesWritable value, Context context) 
                                    throws IOException, InterruptedException {
        context.write(k, value);
    }
}

2:定义SequenceFileReducer类

package inputFormat;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;

public class SequenceFileReducer extends Reducer<Text, BytesWritable, Text, BytesWritable> {
    @Override
    protected void reduce(Text key, Iterable<BytesWritable> values, Context context) 
                                          throws IOException, InterruptedException {
        context.write(key, values.iterator().next());
    }
}

3:定义AllRecordReader类

package inputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;

public class AllRecordReader extends RecordReader<NullWritable, BytesWritable> {
    //配置、切片、数据加工、value值
    private Configuration configuration;
    private FileSplit split;
    private boolean processes = false;
    private BytesWritable value = new BytesWritable();

    public AllRecordReader() {
        super();
    }
    @Override
    public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) 
                                                throws IOException, InterruptedException {
        split = (FileSplit) inputSplit;
        configuration = taskAttemptContext.getConfiguration();
    }

    @Override
    public boolean nextKeyValue() throws IOException, InterruptedException {
        if (!processes){
            //定义缓存区
            byte[] contents = new byte[(int) split.getLength()];
            //获取文件系统
            Path path = split.getPath();
            FileSystem fs = path.getFileSystem(configuration);
            FSDataInputStream fis = fs.open(path);
            IOUtils.readFully(fis,contents,0,contents.length);
            value.set(contents,0,contents.length);
            fis.close();
            processes = true;
            return true;
        }
        return false;
    }
    @Override
    public NullWritable getCurrentKey() throws IOException, InterruptedException {
        return NullWritable.get();
    }
    @Override
    public BytesWritable getCurrentValue() throws IOException, InterruptedException {
        return value;
    }
    @Override
    public float getProgress() throws IOException, InterruptedException {
        return processes ? 1:0;
    }
    @Override
    public void close() throws IOException {

    }
}

4:定义AllFileInputFormat类

package inputFormat;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;

public class AllFileInputFormat extends FileInputFormat<NullWritable, BytesWritable> {

    //是否拆分数据
    @Override
    protected boolean isSplitable(JobContext context, Path filename) {
        return false;
    }

    public RecordReader createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) 
                                     throws IOException, InterruptedException {
        AllRecordReader reader = new AllRecordReader();
        reader.initialize(inputSplit, taskAttemptContext);
        return reader;
    }
}

5:设置SequenceFileDriver类

package inputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import java.io.IOException;

public class SequenceFileDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        args = new String[]{"E:\\bigdata_code\\inputformat","E:\\bigdata_code\\inputformat\\out"};
        Configuration conf = new Configuration();
        Job job = Job.getInstance();

        job.setJarByClass(SequenceFileDriver.class);
        job.setMapperClass(SequenceFileMap.class);
        job.setReducerClass(SequenceFileReducer.class);

        //设置输入的inputFormat
        job.setInputFormatClass(AllFileInputFormat.class);
        //设置输出的outputFormat
        job.setOutputFormatClass(SequenceFileOutputFormat.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(BytesWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(BytesWritable.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.waitForCompletion(true);
    }
}

输出结果:

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值