OutputFormat数据输出
OutputFormat是MapReduce输出的基类,所有实现MapReduce输出都实现了 OutputFormat接口。
1.文本输出TextOutputFormat
默认的输出格式是TextOutputFormat,它把每条记录写为文本行。
它的键和值可以是任意类型,因为TextOutputFormat调用toString()方法把它们转换为字符串。
2.SequenceFileOutputFormat
将SequenceFileOutputFormat输出作为后续 MapReduce任务的输入,这便是一种好的输出格式,
因为它的格式紧凑,很容易被压缩。
3.自定义OutputFormat
根据用户需求,自定义实现输出。
使用场景
控制最终文件的输出路径和输出格式
例如:要在一个MapReduce程序中根据数据的不同输出两类结果到不同目录,
这类灵活的输出需求可以通过自定义OutputFormat来实现。
步骤
(1)自定义一个类继承FileOutputFormat。
(2)改写RecordWriter,具体改写输出数据的方法write()。
案例
1.需求
过滤输入的log日志,包含atguigu的网站输出到e:/atguigu.log,不包含atguigu的网站输出到e:/other.log。
(1)输入数据
http://www.baidu.com
http://www.google.com
http://cn.bing.com
http://www.atguigu.com
http://www.sohu.com
http://www.sina.com
http://www.sin2a.com
http://www.sin2desa.com
http://www.sindsafa.com
(2)期望输出数据
http://www.atguigu.com
两个文件
http://cn.bing.com
http://www.baidu.com
http://www.google.com
http://www.sin2a.com
http://www.sin2desa.com
http://www.sina.com
http://www.sindsafa.com
http://www.sohu.com
2.需求分许
![在这里插入图片描述](https://img-blog.csdnimg.cn/5b7a72096ca34ca78a8b7df72393d16c.png#pic_center)
3.代码实现
OutputFormat类
package com.saddam.bigdata.ShangGuiGu.OutputFormat;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FilterOutputFormat extends FileOutputFormat<Text, NullWritable> {
@Override
public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
return new FRecordWriter(job);
}
}
FRecordWriter类
package com.saddam.bigdata.ShangGuiGu.OutputFormat;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import java.io.IOException;
public class FRecordWriter extends RecordWriter<Text, NullWritable> {
FSDataOutputStream fosAtguigu;
FSDataOutputStream fosOther;
@Override
public void write(Text key, NullWritable value) throws IOException, InterruptedException {
if (key.toString().contains("atguigu")){
fosAtguigu.write(key.toString().getBytes());
}else {
fosOther.write(key.toString().getBytes());
}
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
IOUtils.closeStream(fosAtguigu);
IOUtils.closeStream(fosOther);
}
public FRecordWriter(TaskAttemptContext job) {
try {
FileSystem fs = FileSystem.get(job.getConfiguration());
fosAtguigu = fs.create(new Path("D:\\MR\\MapReduce\\OutputDatas\\OutputFormat\\atguigu.log"));
fosOther=fs.create(new Path("D:\\MR\\MapReduce\\OutputDatas\\OutputFormat\\Other.log"));
}catch (IOException e){
e.printStackTrace();
}
}
}
Mapper类
package com.saddam.bigdata.ShangGuiGu.OutputFormat;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FilterMapper extends Mapper<LongWritable, Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
context.write(value,NullWritable.get());
}
}
Reducer类
package com.saddam.bigdata.ShangGuiGu.OutputFormat;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FilterReducer extends Reducer<Text, NullWritable,Text,NullWritable> {
Text outK=new Text();
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
String line = key.toString();
line=line+"\r\n";
outK.set(line);
for (NullWritable nullWritable:values) {
context.write(outK,NullWritable.get());
}
}
}
Driver类
package com.saddam.bigdata.ShangGuiGu.OutputFormat;
import com.saddam.bigdata.ShangGuiGu.Shuffle.sort.PartitionSort;
import com.saddam.bigdata.ShangGuiGu.Shuffle.sort.SortFlowBean;
import com.saddam.bigdata.ShangGuiGu.Shuffle.sort.SortFlowMapper;
import com.saddam.bigdata.ShangGuiGu.Shuffle.sort.SortFlowReducer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.log4j.BasicConfigurator;
public class FilterDriver {
public static void main(String[] args)throws Exception {
BasicConfigurator.configure();
Configuration configuration=new Configuration();
Job job=Job.getInstance(configuration);
job.setJarByClass(FilterDriver.class);
job.setMapperClass(FilterMapper.class);
job.setReducerClass(FilterReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
job.setOutputFormatClass(FilterOutputFormat.class);
FileInputFormat.setInputPaths(job,new Path("D:\\MR\\MapReduce\\InputDatas\\OutputFormat"));
FileOutputFormat.setOutputPath(job,new Path("D:\\MR\\MapReduce\\OutputDatas\\OutputFormat\\output_outputformat"));
job.waitForCompletion(true);
}
}