1.首先介绍OutputFormat类的作用:
1)验证输出规范的有效性
2)为在输出文件进行写操作的RecordWriter实现类提供一个实例。
RecordWriter用于将输出的值与键类型的Java实例转化成原始字节。
2.主要讨论如何将文本文件转化成XML文件
输出示例
<recs>
<rec>
<key>0</key>
<year>1988</year>
</rec>
<rec>
<key>98</key>
<year>1998<year>
</rec>
</recs>
3.主程序代码
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class OutputFormatTest extends Configured implements Tool{
public static class TextToXMLConversionMapper extends Mapper<LongWritable,Text,LongWritable,Text>{
public void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException{
if(value.toString().contains(" "))
context.write(key, value);
}
}
@Override
public int run(String[] args) throws Exception {
// TODO Auto-generated method stub
Job job =Job.getInstance(getConf());
job.setJarByClass(OutputFormatTest.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(XMLOutputFormat.class);
// job.setMapOutputKeyClass(LongWritable.class);
// job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(TextToXMLConversionMapper.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true);
return job.isSuccessful()?0:1;
}
public static void main(String args[]) throws Exception{
int rex = ToolRunner.run(new Configuration(),new OutputFormatTest() , args);
System.exit(rex);
}
}
4.自定义OutputFormat类代码即XMLOutputFormat类
package com.input.output;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class XMLOutputFormat extends FileOutputFormat<LongWritable,Text>{
protected static class XMLRecordWriter extends RecordWriter<LongWritable,Text>{
private DataOutputStream out;
public XMLRecordWriter(DataOutputStream out) throws IOException{
this.out=out;
out.writeBytes("<recs>\n");
}
private void writeTag(String tag,String value) throws IOException{
out.writeBytes("<"+tag+">"+value+"</"+tag+">\n");
}
@Override
public void close(TaskAttemptContext arg0) throws IOException, InterruptedException {
// TODO Auto-generated method stub
try{
out.writeBytes("</recs>\n");
}finally{
out.close();
}
}
@Override
public void write(LongWritable key, Text value) throws IOException, InterruptedException {
// TODO Auto-generated method stub
out.writeBytes("<rec>");
this.writeTag("key", Long.toString(key.get()));
String contents[] =value.toString().split(",");
this.writeTag("year", contents[0]);
out.writeBytes("</rec>");
}
}
@Override
public RecordWriter<LongWritable, Text> getRecordWriter(TaskAttemptContext job)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
String extension = "data.xml";
Path file = getDefaultWorkFile(job,extension);
FileSystem fs = file.getFileSystem(job.getConfiguration());
FSDataOutputStream fileout = fs.create(file,false);
return new XMLRecordWriter(fileout);
}
}
5.输出结果的处理过程如下:
1)Hadoop的框架通过调用XMLOutputFormat类(OutputFormat的一个实现)实例的getRecordWriter()的方法,获得XMLRecordWriter实例的引用。
2)RecordWriter 配置输出的参数,包括文件名的前缀和扩展名。扩展名会在代码列表里提供。文件的默认前缀是part,该值