MapReduce实战之小文件处理案例(自定义InputFormat)

小文件处理案例(自定义InputFormat)

1)需求

无论hdfs还是mapreduce,对于小文件都有损效率,实践中,又难免面临处理大量小文件的场景,此时,就需要有相应解决方案。将多个小文件合并成一个文件SequenceFile,SequenceFile里面存储着多个文件,存储的形式为文件路径+名称为key,文件内容为value。

2)输入数据

1:

yongpeng weidong weinan
sanfeng luozong xiaoming

2:

longlong fanfan
mazong kailun yuhang yixin
longlong fanfan
mazong kailun yuhang yixin

3:

shuaige changmo zhenqiang 
dongli lingu xuanxuan

最终预期文件格式:

SEQorg.apache.hadoop.io.Text"org.apache.hadoop.io.BytesWritable      ã;ŠCW

uÊÚX@ù½ü˜í   W   "!file:/e:/inputinputformat/one.txt   1yongpeng weidong weinan
sanfeng luozong xiaoming   Y   $#file:/e:/inputinputformat/three.txt   1shuaige changmo zhenqiang 
dongli lingu xuanxuan   €   "!file:/e:/inputinputformat/two.txt   Zlonglong fanfan
mazong kailun yuhang yixin
longlong fanfan
mazong kailun yuhang yixin

3)分析

小文件的优化无非以下几种方式:

(1)在数据采集的时候,就将小文件或小批数据合成大文件再上传HDFS

(2)在业务处理之前,在HDFS上使用mapreduce程序对小文件进行合并

(3)在mapreduce处理时,可采用CombineTextInputFormat提高效率

4)具体实现

本节采用自定义InputFormat的方式,处理输入小文件的问题。

(1)自定义一个类继承FileInputFormat

(2)改写RecordReader,实现一次读取一个完整文件封装为KV

(3)在输出时使用SequenceFileOutPutFormat输出合并文件

5)程序实现:

(1)自定义InputFromat

package com.atguigu.mapreduce.inputformat;

import java.io.IOException;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.BytesWritable;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.mapreduce.InputSplit;

import org.apache.hadoop.mapreduce.JobContext;

import org.apache.hadoop.mapreduce.RecordReader;

import org.apache.hadoop.mapreduce.TaskAttemptContext;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

 

// 定义类继承FileInputFormat

public class WholeFileInputformat extends FileInputFormat<NullWritable, BytesWritable>{

      

       @Override

       protected boolean isSplitable(JobContext context, Path filename) {

              return false;

       }

 

       @Override

       public RecordReader<NullWritable, BytesWritable> createRecordReader(InputSplit split, TaskAttemptContext context)

                     throws IOException, InterruptedException {

             

              WholeRecordReader recordReader = new WholeRecordReader();

              recordReader.initialize(split, context);

             

              return recordReader;

       }

}

(2)自定义RecordReader

package com.atguigu.mapreduce.inputformat;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.FSDataInputStream;

import org.apache.hadoop.fs.FileSystem;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.BytesWritable;

import org.apache.hadoop.io.IOUtils;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.mapreduce.InputSplit;

import org.apache.hadoop.mapreduce.RecordReader;

import org.apache.hadoop.mapreduce.TaskAttemptContext;

import org.apache.hadoop.mapreduce.lib.input.FileSplit;

 

public class WholeRecordReader extends RecordReader<NullWritable, BytesWritable>{

 

       private Configuration configuration;

       private FileSplit split;

      

       private boolean processed = false;

       private BytesWritable value = new BytesWritable();

      

       @Override

       public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {

             

              this.split = (FileSplit)split;

              configuration = context.getConfiguration();

       }

 

       @Override

       public boolean nextKeyValue() throws IOException, InterruptedException {

             

              if (!processed) {

                     // 1 定义缓存区

                     byte[] contents = new byte[(int)split.getLength()];

                    

                     FileSystem fs = null;

                     FSDataInputStream fis = null;

                    

                     try {

                            // 2 获取文件系统

                            Path path = split.getPath();

                            fs = path.getFileSystem(configuration);

                           

                            // 3 读取数据

                            fis = fs.open(path);

                           

                            // 4 读取文件内容

                            IOUtils.readFully(fis, contents, 0, contents.length);

                           

                            // 5 输出文件内容

                            value.set(contents, 0, contents.length);

                     } catch (Exception e) {

                           

                     }finally {

                            IOUtils.closeStream(fis);

                     }

                    

                     processed = true;

                    

                     return true;

              }

             

              return false;

       }

 

       @Override

       public NullWritable getCurrentKey() throws IOException, InterruptedException {

              return NullWritable.get();

       }

 

       @Override

       public BytesWritable getCurrentValue() throws IOException, InterruptedException {

              return value;

       }

 

       @Override

       public float getProgress() throws IOException, InterruptedException {

              return processed? 1:0;

       }

 

       @Override

       public void close() throws IOException {

       }

}

(3)SequenceFileMapper处理流程

package com.atguigu.mapreduce.inputformat;

import java.io.IOException;

import org.apache.hadoop.io.BytesWritable;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.lib.input.FileSplit;

 

public class SequenceFileMapper extends Mapper<NullWritable, BytesWritable, Text, BytesWritable>{

      

       Text k = new Text();

      

       @Override

       protected void setup(Mapper<NullWritable, BytesWritable, Text, BytesWritable>.Context context)

                     throws IOException, InterruptedException {

              // 1 获取文件切片信息

              FileSplit inputSplit = (FileSplit) context.getInputSplit();

              // 2 获取切片名称

              String name = inputSplit.getPath().toString();

              // 3 设置key的输出

              k.set(name);

       }

      

       @Override

       protected void map(NullWritable key, BytesWritable value,

                     Context context)

                     throws IOException, InterruptedException {

 

              context.write(k, value);

       }

}

(4)SequenceFileReducer处理流程

package com.atguigu.mapreduce.inputformat;

import java.io.IOException;

import org.apache.hadoop.io.BytesWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Reducer;

 

public class SequenceFileReducer extends Reducer<Text, BytesWritable, Text, BytesWritable> {

 

       @Override

       protected void reduce(Text key, Iterable<BytesWritable> values, Context context)

                     throws IOException, InterruptedException {

 

              context.write(key, values.iterator().next());

       }

}

(5)SequenceFileDriver处理流程

package com.atguigu.mapreduce.inputformat;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.BytesWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;

 

public class SequenceFileDriver {

 

       public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

             

              args = new String[] { "e:/input/inputinputformat", "e:/output1" };

              Configuration conf = new Configuration();

 

              Job job = Job.getInstance(conf);

              job.setJarByClass(SequenceFileDriver.class);

              job.setMapperClass(SequenceFileMapper.class);

              job.setReducerClass(SequenceFileReducer.class);

 

        // 设置输入的inputFormat

              job.setInputFormatClass(WholeFileInputformat.class);

        // 设置输出的outputFormat

              job.setOutputFormatClass(SequenceFileOutputFormat.class);

 

              job.setMapOutputKeyClass(Text.class);

              job.setMapOutputValueClass(BytesWritable.class);

             

              job.setOutputKeyClass(Text.class);

              job.setOutputValueClass(BytesWritable.class);

 

              FileInputFormat.setInputPaths(job, new Path(args[0]));

              FileOutputFormat.setOutputPath(job, new Path(args[1]));

 

              boolean result = job.waitForCompletion(true);

 

              System.exit(result ? 0 : 1);

       }

}

 

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值