说明
自定义InputFormat一般应用于hadoop自带的InputFormat类型不能满足某个应用场景中,需要我们自定义来解决
自定义步骤
自定义Inputformat步骤如下:
- 自定义一个类继承InputFormat
- 改写RecordReader,实现一次读取一个完成的文件封装为KV
- 在输出时使用SequenceFileOutPutFormat输出合并文件SequenceFile文件
(SequenceFile文件是hadoop用来存储二进制形式的key-value对的文件格式,SequenceFile里面存储着很多小文件,存储的形式为文件路径+名称为key,文件内容为value)。
示例操作
步骤:
- 自定义一个类继承FileInputFormat
a. 重写isSpliable()方法,返回false 设置为文件不可分割
b. 重写createRecordReader(),创建自定义的RecordReader对象,并初始化 - 改写RecordReader,实现一次读取一个完整的文件封装成为KV
a. 采用IO流一次读取一个文件输出到value中,因为设置了不可分割所以整个文件都封装到了value中
b. 获取文件路径信息+名称作为key - 设置Driver
a.
代码:
CustomerInputFormate自定义InputFormat
package com.xing.MapReduce.InputFormatSequenceFile;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;
/**
* 自定义InputFormat
*/
public class CustomerInputFormate extends FileInputFormat<Text,BytesWritable> {
/**
* 新建自定义的RecordReader
* @param inputSplit
* @param taskAttemptContext
* @return
* @throws IOException
* @throws InterruptedException
*/
public RecordReader<Text, BytesWritable> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
CustomerRecordReader reader = new CustomerRecordReader();
reader.initialize(inputSplit,taskAttemptContext );
return reader;
}
/**
* 设置为不可分割
* @param context
* @param filename
* @return
*/
@Override
protected boolean isSplitable(JobContext context, Path filename) {
return false;
}
}
CustomerRecordReader 自定义RecordReader
package com.xing.MapReduce.InputFormatSequenceFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
/**
* 自定义RecordReader
*/
public class CustomerRecordReader extends RecordReader<Text, BytesWritable> {
// 分片信息
FileSplit split;
Configuration configuration;
Text k = new Text();
BytesWritable v = new BytesWritable();
boolean isProgress = true;
//初始化方法
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
this.split = (FileSplit) inputSplit;
this.configuration = taskAttemptContext.getConfiguration();
}
//核心业务处理(对key和value的封装)
public boolean nextKeyValue() throws IOException, InterruptedException {
if (isProgress){
//1. 获取fs
Path path = split.getPath();
System.out.println("@@@@@@@@@@@@@@@@"+split.getLength());
System.out.println("$$$$$$$$$$$$$$$$"+split);
FileSystem fileSystem = path.getFileSystem(configuration);
//2. 获取输入流
FSDataInputStream fis = fileSystem.open(path);
//3. 拷贝
byte[] bytes = new byte[(int) split.getLength()];
IOUtils.readFully(fis, bytes,0 ,bytes.length);
//4. 填充k-v
v.set(bytes,0 ,bytes.length);
k.set(path.toString());
//5. 收尾
IOUtils.closeStream(fis);
isProgress = false;
return true;
}
return false;
}
// 获取kay值
public Text getCurrentKey() throws IOException, InterruptedException {
return k;
}
// 获取value值
public BytesWritable getCurrentValue() throws IOException, InterruptedException {
return v;
}
public float getProgress() throws IOException, InterruptedException {
return 0;
}
public void close() throws IOException {
}
}
SequenceFileMapper
package com.xing.MapReduce.InputFormatSequenceFile;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class SequenceFileMapper extends Mapper<Text,BytesWritable,Text,BytesWritable> {
@Override
protected void map(Text key, BytesWritable value, Context context) throws IOException, InterruptedException {
// 直接输出 这里的key就是文件路径信息 value就是文本内容
context.write(key,value );
}
}
SequenceFileReducer
package com.xing.MapReduce.InputFormatSequenceFile;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class SequenceFileReducer extends Reducer<Text,BytesWritable,Text,BytesWritable> {
@Override
protected void reduce(Text key, Iterable<BytesWritable> values, Context context) throws IOException, InterruptedException {
// 就是简单的输出内容
for (BytesWritable value : values) {
context.write(key,value);
}
}
}
SequenceFileDriver
package com.xing.MapReduce.InputFormatSequenceFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import java.io.IOException;
public class SequenceFileDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
// 获取configuration和FileSystem
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(configuration);
// 设置job的名字和jar包
Job job = Job.getInstance(configuration);
job.setJobName("SequenceFileDriver");
job.setJarByClass(SequenceFileDriver.class);
// 设置job的mapper和reduce的处理类名
job.setMapperClass(SequenceFileMapper.class);
job.setReducerClass(SequenceFileReducer.class);
// 设置输入类型和输出类型
job.setInputFormatClass(CustomerInputFormate.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
// 设置输出的key和输出的value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(BytesWritable.class);
// 判断输出地址是否存在
if (fs.exists(new Path("E:\\hdfs\\output1"))){
fs.delete(new Path("E:\\hdfs\\output1"),true );
}
// 设置输入和输出的文件路径
FileInputFormat.setInputPaths(job,new Path("E:\\hdfs\\input"));
FileOutputFormat.setOutputPath(job,new Path("E:\\hdfs\\output1"));
// 返回结果
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : -1);
}
}
输入和输出
输入:
输出: