MapReduce编程规范
用户自己编写的内容:Mapper、Reducer、Driver
Mapper阶段:
- 用户自定义的Mapper 要继承自己的父类即继承(Mapper)
- Mapper的输入数据是键值对的形式(KV的类型可自定义)
- Mapper中的业务逻辑写在map()方法内
- Mapper的输出数据的键值对的形式
- map() 方法对每一个<k,v>调用一次
Reducer阶段
- 用户自定义的Reducer要继承自己的父类即继承(Reduce)
- Reducer的输入数据类型对应Mapper的输出数据类型,也是键值对
- Reducer的业务逻辑写在Reduce() 方法内
- ReduceTask进程对每一组相同的<k,v>调用一次
WordCountMapper
package com.hadwinling.mapreduce.wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
// map阶段
// KEYIN 输入数据的key
// VALUEIN 输入数据的value
// KEYOUT 输出数据的key的类型 atguigu,1 ss,1
// VALUEOUT 输出的数据的value类型
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
Text k = new Text();
IntWritable v = new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
System.out.println(key.toString());
// 1 获取一行
String line = value.toString();
// 2 切割单词
String[] words = line.split(" ");
// 3 循环写出
for (String word : words) {
k.set(word);
context.write(k, v);
}
}
}
WordCountReducer
package com.hadwinling.mapreduce.wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.Iterator;
// KEYIN, VALUEIN map阶段输出的key和value
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
IntWritable v = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
// 1 累加求和
for (IntWritable value : values) {
sum += value.get();
}
v.set(sum);
// 2 输出结果
context.write(key, v);
}
}
WordCountDriver
package com.hadwinling.mapreduce.wordcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class WordCountDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args = new String[]{"/home/hadoop/MyTmp/mapreduceTest.txt", "/home/hadoop/workplace/Result/mapreduceTestReduce.txt"};
Configuration conf = new Configuration();
// 开启map端输出压缩
conf.setBoolean("mapreduce.map.output.compress", true);
// 设置map端输出压缩方式
conf.setClass("mapreduce.map.output.compress.codec", BZip2Codec.class, CompressionCodec.class);
// 1 获取Job对象
Job job = Job.getInstance(conf);
// 2 设置jar存储位置
job.setJarByClass(WordCountDriver.class);
// 3 关联Map和Reduce类
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4 设置Mapper阶段输出数据的key和value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5 设置最终数据输出的key和value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 6 设置输入路径和输出路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 7 提交job
// job.submit();
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}