1)需求
无论hdfs还是mapreduce,对于小文件都有损效率,实践中,又难免面临处理大量小文件的场景,此时,就需要有相应解决方案。将多个小文件合并成一个文件SequenceFile,SequenceFile里面存储着多个文件,存储的形式为文件路径+名称为key,文件内容为value。
2)输入数据
one.txt hhhhh
two.txt yyyyy
three.txt akjfkajkfak
3)分析
小文件的优化无非以下几种方式:
(1)在数据采集的时候,就将小文件或小批数据合成大文件再上传HDFS
(2)在业务处理之前,在HDFS上使用mapreduce程序对小文件进行合并
(3)在mapreduce处理时,可采用CombineTextInputFormat提高效率
4)具体实现
本节采用自定义InputFormat的方式,处理输入小文件的问题。
(1)自定义一个类继承FileInputFormat
(2)改写RecordReader,实现一次读取一个完整文件封装为KV
(3)在输出时使用SequenceFileOutPutFormat输出合并文件
5)程序实现:
(1)自定义InputFromat
package com.lzz.mapreduce.inputformat;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
public class WholeFileInputFormat extends FileInputFormat<NullWritable, BytesWritable> {
//判断是否可切割
@Override
protected boolean isSplitable(JobContext context, Path filename) {
return false;
}
@Override
public RecordReader<NullWritable, BytesWritable> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
//创建对象
WholeRecordReader recordReader=new WholeRecordReader();
//初始化
recordReader.initialize(split, context);
return recordReader;
}
}
(2)自定义RecordReader
package com.lzz.mapreduce.inputformat;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
public class WholeRecordReader extends RecordReader<NullWritable, BytesWritable>{
BytesWritable value=new BytesWritable();
boolean isProcess=false;
FileSplit split;
Configuration conf;
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
// 初始化
this.split=(FileSplit)split;
conf=context.getConfiguration();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
// 读取一个一个的文件
if(!isProcess) {
//0缓存区
byte[] buf=new byte[(int) split.getLength()];
FileSystem fs = null;
FSDataInputStream fis = null;
try {
//1获取文件系统
Path path=split.getPath();
fs=path.getFileSystem(conf);
//2打开文件输入流
fis=fs.open(path);
//3流的拷贝
IOUtils.readFully(fis, buf, 0, buf.length);
//4拷贝缓存区的数据到最终输出
value.set(buf, 0, buf.length);
} catch (Exception e) {
}finally {
IOUtils.closeStream(fis);
IOUtils.closeStream(fs);
}
isProcess=true;
return true;
}
return false;
}
@Override
public NullWritable getCurrentKey() throws IOException, InterruptedException {
return NullWritable.get();
}
@Override
public BytesWritable getCurrentValue() throws IOException, InterruptedException {
return value;
}
@Override
public float getProgress() throws IOException, InterruptedException {
return isProcess?1:0;
}
@Override
public void close() throws IOException {
}
}
(3)SequenceFileMapper处理流程
package com.lzz.mapreduce.inputformat;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
public class SequenceFileMapper extends Mapper<NullWritable, BytesWritable, Text, BytesWritable>{
Text k=new Text();
@Override
protected void setup(Mapper<NullWritable, BytesWritable, Text, BytesWritable>.Context context)
throws IOException, InterruptedException {
// 获取文件的路径和名称
FileSplit split=(FileSplit)context.getInputSplit();
Path path=split.getPath();
k.set(path.toString());
}
@Override
protected void map(NullWritable key, BytesWritable value,Context context)
throws IOException, InterruptedException {
context.write(k, value);
}
}
(4)SequenceFileReducer处理流程
package com.lzz.mapreduce.inputformat;
import java.io.IOException;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class SequenceFIleReducer extends Reducer<Text, BytesWritable, Text, BytesWritable>{
@Override
protected void reduce(Text key, Iterable<BytesWritable> values,
Context context) throws IOException, InterruptedException {
for (BytesWritable bytesWritable : values) {
context.write(key, bytesWritable);
}
}
}
(5)SequenceFileDriver处理流程
package com.lzz.mapreduce.inputformat;
import java.io.IOException;
import java.time.Year;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
public class SequenceFileDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args=new String[] {"g:/input/inputformat","g:/output"};
Configuration configuration=new Configuration();
Job job=Job.getInstance(configuration);
job.setJarByClass(SequenceFileDriver.class);
job.setMapperClass(SequenceFileMapper.class);
job.setReducerClass(SequenceFIleReducer.class);
//设置输入输出格式
job.setInputFormatClass(WholeFileInputFormat.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(BytesWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(BytesWritable.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean result=job.waitForCompletion(true);
System.exit(result?0:1);
}
}
运行结果