1. 自定义InputFormat合并小文件
1.1 需求
无论hdfs还是mapreduce,对于小文件都有损效率,实践中,又难免面临处理大量小文件的场景,此时,就需要有相应解决方案
1.2 分析
小文件的优化无非以下几种方式:
1、 在数据采集的时候,就将小文件或小批数据合成大文件再上传HDFS
2、 在业务处理之前,在HDFS上使用mapreduce程序对小文件进行合并
3、 在mapreduce处理时,可采用combineInputFormat提高效率
1.3 实现
本节实现的是上述第二种方式
程序的核心机制:
自定义一个InputFormat
改写RecordReader,实现一次读取一个完整文件封装为KV
在输出时使用SequenceFileOutPutFormat输出合并文件
代码如下:
自定义InputFromat
package demo01;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;
public class MyinputFormat extends FileInputFormat<NullWritable, BytesWritable> {
//
@Override
public RecordReader<NullWritable, BytesWritable> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
//1.创建自定义RecordReader对象
MyRecordReader myRecordReader=new MyRecordReader();
//2.将inputSplit和context 对象传给RecordReader
myRecordReader.initialize(inputSplit, taskAttemptContext);
return myRecordReader;
}
//设置文件是否可以被切割
@Override
protected boolean isSplitable(JobContext context, Path filename) {
return false;
}
}
自定义RecordReader
package demo01;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
public class MyRecordReader extends RecordReader<NullWritable, BytesWritable> {
private Configuration configuration=null;
private FileSplit fileSplit=null;
private boolean processed=false;
private BytesWritable bytesWritable = new BytesWritable();
private FileSystem fileSystem=null;
private FSDataInputStream inputStream=null;
//初始化
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
//获取configuration对象
configuration=taskAttemptContext.getConfiguration();
//获取文件的切片
fileSplit=(FileSplit)inputSplit;
}
//该方法用于获取K1和V1
/*
* K1:NullWritable
* V2:BytesWritable
* */
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if(!processed) {
//1.读取源文件的字节输入流
//1.1 获取源文件的文件系统(FileSystem)
fileSystem = FileSystem.get(configuration);
//1.2 通过FileSystem,获取文件字节输入流
inputStream = fileSystem.open(fileSplit.getPath());
//2.读取源文件数据到普通的字节数组(byte[])
byte[] bytes = new byte[(int) fileSplit.getLength()];
IOUtils.readFully(inputStream, bytes, 0, (int) fileSplit.getLength());
//3.将字节数组中数据封装到BytesWritable,得到V1
bytesWritable.set(bytes, 0, (int) fileSplit.getLength());
processed=true;
return processed;
}
return false;
}
//返回K1
@Override
public NullWritable getCurrentKey() throws IOException, InterruptedException {
return NullWritable.get();
}
//返回v1
@Override
public BytesWritable getCurrentValue() throws IOException, InterruptedException {
return bytesWritable ;
}
//获取文件读取进度
@Override
public float getProgress() throws IOException, InterruptedException {
return 0;
}
//进行资源释放
@Override
public void close() throws IOException {
inputStream.close();
fileSystem.close();
}
}
Mapper类:
package demo01;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
public class SequenceFileMapper extends Mapper<NullWritable, BytesWritable,Text,BytesWritable> {
//1.获取文件的名字,作为K2
@Override
protected void map(NullWritable key, BytesWritable value, Context context) throws IOException, InterruptedException {
FileSplit fileSplit= (FileSplit)context.getInputSplit();
String filename =fileSplit.getPath().getName();
//2.将k2和v2写入上下文当中
context.write(new Text(filename),value);
}
}
主类:
package demo01;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class JobMain extends Configured implements Tool {
@Override
public int run(String[] strings) throws Exception {
Job job = Job.getInstance(super.getConf(), "qequence_file_job");
//2.设置job任务
//第一步:设置输入类和输入的路径
job.setInputFormatClass(MyinputFormat.class);
MyinputFormat.addInputPath(job,new Path("file:///D:\\input\\myInputformat_input"));
//第二步:设置Mapper类和数据类型
job.setMapperClass(SequenceFileMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(BytesWritable.class);
//....
//第七步:不设置reducer类,设置数据类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(BytesWritable.class);
//第八步:设置输出类和输出路径
job.setOutputFormatClass(SequenceFileOutputFormat.class);
SequenceFileOutputFormat.setOutputPath(job,new Path("file:///D:\\out\\myinputformat"));
boolean b = job.waitForCompletion(true);
//3.等待job任务执行结束
return b ?0:1;
}
public static void main(String[] args) throws Exception {
Configuration configuration =new Configuration();
int run = ToolRunner.run(configuration, new JobMain(), args);
System.exit(run);
}
}
2.1 需求
现在有一些订单的评论数据,需求,将订单的好评与差评进行区分开来,将最终的数据分开到不同的文件夹下面去,数据内容参见资料文件夹,其中数据第九个字段表示好评,中评,差评。0:好评,1:中评,2:差评
2.2 分析
程序的关键点是要在一个mapreduce程序中根据数据的不同输出两类结果到不同目录,这类灵活的输出需求可以通过自定义outputformat来实现
2.3 实现
实现要点:
1、 在mapreduce中访问外部资源
2、 自定义outputformat,改写其中的recordwriter,改写具体输出数据的方法write()
MyOutputFormat类:
第一步:自定义MyOutputFormat
public class MyOutputFormat extends FileOutputFormat<Text,NullWritable> {
@Override
public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
//1:获取目标文件的输出流(两个)
FileSystem fileSystem = FileSystem.get(taskAttemptContext.getConfiguration());
FSDataOutputStream goodCommentsOutputStream = fileSystem.create(new Path("file:///D:\\out\\good_comments\\good_comments.txt"));
FSDataOutputStream badCommentsOutputStream = fileSystem.create(new Path("file:///D:\\out\\bad_comments\\bad_comments.txt"));
//2:将输出流传给MyRecordWriter
MyRecordWriter myRecordWriter = new MyRecordWriter(goodCommentsOutputStream,badCommentsOutputStream);
return myRecordWriter;
}
}
MyRecordReader类:
public class MyRecordWriter extends RecordWriter<Text,NullWritable> {
private FSDataOutputStream goodCommentsOutputStream;
private FSDataOutputStream badCommentsOutputStream;
public MyRecordWriter() {
}
public MyRecordWriter(FSDataOutputStream goodCommentsOutputStream, FSDataOutputStream badCommentsOutputStream) {
this.goodCommentsOutputStream = goodCommentsOutputStream;
this.badCommentsOutputStream = badCommentsOutputStream;
}
/**
*
* @param text 行文本内容
* @param nullWritable
* @throws IOException
* @throws InterruptedException
*/
@Override
public void write(Text text, NullWritable nullWritable) throws IOException, InterruptedException {
//1:从行文本数据中获取第9个字段
String[] split = text.toString().split("\t");
String numStr = split[9];
//2:根据字段的值,判断评论的类型,然后将对应的数据写入不同的文件夹文件中
if(Integer.parseInt(numStr) <= 1){
//好评或者中评
goodCommentsOutputStream.write(text.toString().getBytes());
goodCommentsOutputStream.write("\r\n".getBytes());
}else{
//差评
badCommentsOutputStream.write(text.toString().getBytes());
badCommentsOutputStream.write("\r\n".getBytes());
}
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
IOUtils.closeStream(goodCommentsOutputStream);
IOUtils.closeStream(badCommentsOutputStream);
}
}
第二步:自定义Mapper类
public class MyOutputFormatMapper extends Mapper<LongWritable,Text,Text,NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
context.write(value, NullWritable.get());
}
}
第三步:主类JobMain
public class JobMain extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
//1:获取job对象
Job job = Job.getInstance(super.getConf(), "myoutputformat_job");
//2:设置job任务
//第一步:设置输入类和输入的路径
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job, new Path("file:///D:\\input\\myoutputformat_input"));
//第二步:设置Mapper类和数据类型
job.setMapperClass(MyOutputFormatMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
//第八步:设置输出类和输出的路径
job.setOutputFormatClass(MyOutputFormat.class);
MyOutputFormat.setOutputPath(job, new Path("file:///D:\\out\\myoutputformat_out"));
//3:等待任务结束
boolean bl = job.waitForCompletion(true);
return bl ? 0 : 1;
}
public static void main(String[] args) throws Exception {
Configuration configuration = new Configuration();
int run = ToolRunner.run(configuration, new JobMain(), args);
System.exit(run);
}
}