自定义inputformat与自定义outputformat
自定义inputformat
Driver类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
public class MyDriver {
public static void main(String[] args) throws Exception{
Job job =Job.getInstance(new Configuration(), "MyDIY");
job.setJarByClass(MyDriver.class);
job.setMapperClass(MyMap.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(BytesWritable.class);
//设置文件读取的Format
job.setInputFormatClass(MyInputFormat.class);
//设置读取数据的路径
MyInputFormat.addInputPath(job,new Path("file:///F:\\传智播客\\传智专修学院\\19级\\05MapReduce\\素材\\5\\自定义inputformat_小文件合并\\input"));
//设置输出格式
job.setOutputFormatClass(SequenceFileOutputFormat.class);
//设置输出的路径
SequenceFileOutputFormat.setOutputPath(job,new Path("file:///F:\\传智播客\\传智专修学院\\19级\\05MapReduce\\素材\\5\\自定义inputformat_小文件合并\\input\\output"));
boolean b = job.waitForCompletion(true);
System.exit(b?0:1);
}
}
InputFormat类
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;
public class MyInputFormat extends FileInputFormat {
@Override
public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
MyRR myRR=new MyRR();
myRR.initialize(split,context);
return myRR;
}
@Override
protected boolean isSplitable(JobContext context, Path filename) {
return false;
}
}
map类
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.IOException;
public class MyMap extends Mapper<NullWritable, BytesWritable, Text,BytesWritable> {
@Override
protected void map(NullWritable key, BytesWritable value, Context context) throws IOException, InterruptedException {
//获取数据的文件名
FileSplit inputSplit = (FileSplit) context.getInputSplit();
String fileName = inputSplit.getPath().getName();
context.write(new Text(fileName),value);
}
}
recordreader类
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import java.io.File;
import java.io.IOException;
public class MyRR extends RecordReader {
// private Path path;
private FileSplit split1;
private Configuration configuration;
private BytesWritable bytesWritable=new BytesWritable();
private boolean next=false;
//初始化操作
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
split1 = (FileSplit) split;
configuration= context.getConfiguration();
}
//获得下一条数据
//系统默认读取一行数据,我们的需求需要的是所有的数据,所以在这里获取文件的所有的数据,并且类型是BytesWritable
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!next){
//获取文件路径
Path path = split1.getPath();
//实例文件系统
FileSystem fileSystem = FileSystem.get(configuration);
FSDataInputStream open = fileSystem.open(path);
byte[] bytes = new byte[(int) split1.getLength()];
//读取所有数据
IOUtils.readFully(open,bytes,0,bytes.length);
//填充到BytesWritable类型的变量中
bytesWritable.set(bytes, 0,bytes.length);
next=true;
return true;
}
return false;
}
//获得当前的key
@Override
public Object getCurrentKey() throws IOException, InterruptedException {
return NullWritable.get();
}
//获得当前的value
@Override
public Object getCurrentValue() throws IOException, InterruptedException {
//返回一个文件内的所有数据, 且数据格式是BytesWritable
return bytesWritable;
}
//获得任务进度
@Override
public float getProgress() throws IOException, InterruptedException {
return 0;
}
//用于关闭系统内的资源
@Override
public void close() throws IOException {
}
}
自定义outputformat
Driver类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
public class MyDrive {
public static void main(String[] args) throws Exception {
Job job = Job.getInstance(new Configuration(), "MyDrive");
job.setJarByClass(MyDrive.class);
job.setMapperClass(MyMap.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job,new Path("E:\\传智\\大二上半年\\10月\\预习资料\\05MapReduce\\素材\\素材\\5\\自定义outputformat\\input"));
job.setOutputFormatClass(MyOutPutfromat.class);
MyOutPutfromat.setOutputPath(job,new Path("E:\\传智\\大二上半年\\10月\\预习资料\\05MapReduce\\素材\\素材\\5\\自定义outputformat\\input\\output"));
boolean b = job.waitForCompletion(true);
System.exit(b?0:1);
}
}
map类
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class MyMap extends Mapper<LongWritable,Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
context.write(value,NullWritable.get());
}
}
OutPutfromat类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class MyOutPutfromat extends FileOutputFormat<Text, NullWritable> {
@Override
public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
//1.创建文件的路径
//1.1获取文件系统对象
FileSystem fileSystem = FileSystem.get(conf);
//好评输出流
FSDataOutputStream fsdGood = fileSystem.create(new Path("E:\\传智\\大二上半年\\10月\\预习资料\\05MapReduce\\素材\\素材\\5\\自定义outputformat\\input\\Good.txt"));
FSDataOutputStream fsdBad = fileSystem.create(new Path("E:\\传智\\大二上半年\\10月\\预习资料\\05MapReduce\\素材\\素材\\5\\自定义outputformat\\input\\Bad.txt"));
MyRW rw = new MyRW(fsdGood, fsdBad);
return rw;
}
}
recordwritter类
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import java.io.IOException;
public class MyRW extends RecordWriter<Text, NullWritable> {
private FSDataOutputStream fsdGood;
private FSDataOutputStream fsdBad;
public MyRW(FSDataOutputStream fsdGood, FSDataOutputStream fsdBad) {
this.fsdGood = fsdGood;
this.fsdBad = fsdBad;
}
public MyRW() {
}
@Override
public void write(Text text, NullWritable nullWritable) throws IOException, InterruptedException {
String[] split = text.toString().split("\\t");
if (split[9].equals("0")){
fsdGood.write(text.toString().getBytes());
fsdGood.write("\r\n".getBytes());
}else {
fsdBad.write(text.toString().getBytes());
fsdBad.write("\r\n".getBytes());
}
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
if (fsdGood!=null){
fsdGood.close();
}
if (fsdBad!=null){
fsdBad.close();
}
}
}