hadoop_mapreduce05-自定义OutputFormat实现类实操
可以输出到本地、mysql、hdfs等
自定义OutputFormat接口实现类
OutputFormat是MapReduce输出的基类,所有实现MapReduce输出都实现了 OutputFormat
接口。介绍几种常见的OutputFormat实现类。
1.文本输出TextOutputFormat
默 认的输出格式是TextOutputFormat,它把每条记录写为文本行。它的键和值可以是任
意类型,因为TextOutputFormat调用toString()方法把它们转换为字符串。
2.SequenceFileOutputFormat
将SequenceFileOutputFormat输出作为后续 MapReduce任务的输入,这便是一种好的输出
格式,因为它的格式紧凑,很容易被压缩。
3.自定义OutputFormat
根据用户需求,自定义实现输出。
使用场景
为了实现控制最终文件的输出路径和输出格式,可以自定义OutputFormat。
例如:要在一个MapReduce程序中根据数据的不同输出两类结果到不同目
录,这类灵活的输出需求可以通过自定义OutputFormat来实现。
自定义OutputFormat步骤
(1)自定义一个类继承FileOutputFormat。
(2)改写RecordWriter,具体改写输出数据的方法write()。
案例
需求描述
过滤输入的 log 日志,包含 aliyun 的网站输出到aliyun.log,不包含 aliyun 的网站
输出到 other.log。
需求分析
-
需求:过滤输入的 log 日志,包含 aliyun 的网站输出到aliyun.log,不包含 aliyun 的网站
输出到 other.log。 -
输入url.txt:
https://www.baidu.com
https://developer.aliyun.com/mirror
https://help.aliyun.com/product/27797.html
https://www.hhhh.com -
输出aliyun.log 和other.log
aliyun.log
https://developer.aliyun.com/mirror
https://help.aliyun.com/product/27797.html
other.log
https://www.baidu.com
https://www.hhhh.com -
自定义一个OutputFormat类
(1)创建一个类FilterRecordWriter继承RecordWriter
(a)创建两个文件的输出流:atguiguOut、otherOut
(b)如果输入数据包含atguigu,输出到atguiguOut流,如果不包含atguigu,输出到otherOut流 -
驱动类Driver
// 要将自定义的输出格式组件设置到job中
job.setOutputFormatClass(FilterOutputFormat.class);
需求实现
(0)maven
<dependencies>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.2</version>
</dependency>
</dependencies>
在项目的src/main/resources 目录下,新建一个文件,命名为“log4j.properties”,在文件中输入:
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%
(1)FilterMapper类
package com.art.mr.outputformat;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FilterMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// https://www.baidu.com
// https://developer.aliyun.com/mirror
context.write(value, NullWritable.get() );
// super.map(key, value, context);
}
}
(2)FilterReducer类
package com.art.mr.outputformat;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FilterReducer extends Reducer<Text,NullWritable, Text,NullWritable> {
Text k = new Text();
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
// 最后一步:在程序运行完成后发现每个url之间没有换行 所以在写出时拼接了\r\n
String line = key.toString();
line += "\r\n";
k.set(line);
// 防止有重复的 todo:这里不是很理解 debug下
for(NullWritable nullWritable : values){
context.write(k,NullWritable.get());
}
// super.reduce(key, values, context);
}
}
(3)FilterOutputFormat类
package com.art.mr.outputformat;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* 自定义一个OutputFormat类
* 1 创建一个类FilterRecordWriter继承RecordWriter
* a...
* b...
*/
public class FilterOutputFormat extends FileOutputFormat<Text, NullWritable> {
public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
return new FRecordWriter(job);
}
}
FRecordWriter类
//
// Source code recreated from a .class file by IntelliJ IDEA
// (powered by Fernflower decompiler)
//
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.util.Progressable;
@Public
@Evolving
public interface TaskAttemptContext extends JobContext, Progressable {
TaskAttemptID getTaskAttemptID();
void setStatus(String var1);
String getStatus();
float getProgress();
Counter getCounter(Enum<?> var1);
Counter getCounter(String var1, String var2);
}
(4)FilterDriver类
package com.art.mr.outputformat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FilterDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
// 测试输入输出路径
args = new String[]{"/Users/art/Documents/demo_datas/filter_outputformat_inputs/url.txt","/Users/art/Documents/demo_datas/filter_outputformat_outputs/"};
// 1. 配置项和job
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
// 2. driver
job.setJarByClass(FilterDriver.class);
// 3. map及输出类
job.setMapperClass(FilterMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
// 4. reduce及输出类
job.setReducerClass(FilterReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
// 5. 输入输出路径
job.setOutputFormatClass(FilterOutputFormat.class);
FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1])); // 因为还有个success的标记类 为这个才设置
// 6. 提交
boolean result = job.waitForCompletion(true);
System.exit(result ? 0:1);
}
}
done.