MapReduce是一种分布式计算模型(distributed programming model),由Google于2004年左右提出,主要用于搜索领域,解决海量数据的计算问题。
MapReduce由两个阶段组成:即Map阶段和Reduce阶段,用户需要实现map()函数和reduce()函数,用于实现分布式计算。
1、WordCountApp.java
package cmd;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class WordCountApp extends Configured implements Tool{
static String INPUT_PATH = "";
static String OUT_PATH = "";
@Override
public int run(String[] arg0) throws Exception {
INPUT_PATH = arg0[0];
OUT_PATH = arg0[1];
Configuration conf = new Configuration();
final FileSystem fileSystem = FileSystem.get(new URI(INPUT_PATH), conf);
final Path outPath = new Path(OUT_PATH);
if(fileSystem.exists(outPath)){
fileSystem.delete(outPath, true);
}
final Job job = new Job(conf , WordCountApp.class.getSimpleName());
//打包运行必须执行的秘密方法
job.setJarByClass(WordCountApp.class);
//1.1指定读取的文件位于哪里
FileInputFormat.setInputPaths(job, INPUT_PATH);
//指定如何对输入文件进行格式化,把输入文件每一行解析成键值对
//job.setInputFormatClass(TextInputFormat.class);
//1.2 指定自定义的map类
job.setMapperClass(MyMapper.class);
//map输出的<k,v>类型。如果<k3,v3>的类型与<k2,v2>类型一致,则可以省略
//job.setMapOutputKeyClass(Text.class);
//job.setMapOutputValueClass(LongWritable.class);
//1.3 分区
//job.setPartitionerClass(HashPartitioner.class);
//有一个reduce任务运行
//job.setNumReduceTasks(1);
//1.4 TODO 排序、分组
//1.5 TODO 规约
//2.2 指定自定义reduce类
job.setReducerClass(MyReducer.class);
//指定reduce的输出类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
//2.3 指定写出到哪里
FileOutputFormat.setOutputPath(job, outPath);
//指定输出文件的格式化类
//job.setOutputFormatClass(TextOutputFormat.class);
//把job提交给JobTracker运行
job.waitForCompletion(true);
return 0;
}
public static void main(String[] args) throws Exception {
ToolRunner.run(new WordCountApp(), args);
}
/**
* KEYIN 即k1 表示行的偏移量
* VALUEIN 即v1 表示行文本内容
* KEYOUT 即k2 表示行中出现的单词
* VALUEOUT 即v2 表示行中出现的单词的次数,固定值1
*/
static class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable>{
protected void map(LongWritable k1, Text v1, Context context) throws java.io.IOException ,InterruptedException {
final String[] splited = v1.toString().split("\t");
for (String word : splited) {
context.write(new Text(word), new LongWritable(1));
}
};
}
/**
* KEYIN 即k2 表示行中出现的单词
* VALUEIN 即v2 表示行中出现的单词的次数
* KEYOUT 即k3 表示文本中出现的不同单词
* VALUEOUT 即v3 表示文本中出现的不同单词的总次数
*
*/
static class MyReducer extends Reducer<Text, LongWritable, Text, LongWritable>{
protected void reduce(Text k2, java.lang.Iterable<LongWritable> v2s, Context ctx) throws java.io.IOException ,InterruptedException {
long times = 0L;
for (LongWritable count : v2s) {
times += count.get();
}
ctx.write(k2, new LongWritable(times));
};
}
}
2、GroupApp.java
package group;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
public class GroupApp {
static final String INPUT_PATH = "hdfs://cloud4:9000/input";
static final String OUT_PATH = "hdfs://cloud4:9000/out";
public static void main(String[] args) throws Exception{
final Configuration configuration = new Configuration();
final FileSystem fileSystem = FileSystem.get(new URI(INPUT_PATH), configuration);
if(fileSystem.exists(new Path(OUT_PATH))){
fileSystem.delete(new Path(OUT_PATH), true);
}
final Job job = new Job(configuration, GroupApp.class.getSimpleName());
//1.1 指定输入文件路径
FileInputFormat.setInputPaths(job, INPUT_PATH);
//指定哪个类用来格式化输入文件
job.setInputFormatClass(TextInputFormat.class);
//1.2指定自定义的Mapper类
job.setMapperClass(MyMapper.class);
//指定输出<k2,v2>的类型
job.setMapOutputKeyClass(NewK2.class);
job.setMapOutputValueClass(LongWritable.class);
//1.3 指定分区类
job.setPartitionerClass(HashPartitioner.class);
job.setNumReduceTasks(1);
//1.4 TODO 排序、分区
job.setGroupingComparatorClass(MyGroupingComparator.class);
//1.5 TODO (可选)合并
//2.2 指定自定义的reduce类
job.setReducerClass(MyReducer.class);
//指定输出<k3,v3>的类型
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(LongWritable.class);
//2.3 指定输出到哪里
FileOutputFormat.setOutputPath(job, new Path(OUT_PATH));
//设定输出文件的格式化类
job.setOutputFormatClass(TextOutputFormat.class);
//把代码提交给JobTracker执行
job.waitForCompletion(true);
}
static class MyMapper extends Mapper<LongWritable, Text, NewK2, LongWritable>{
protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper<LongWritable,Text,NewK2,LongWritable>.Context context) throws java.io.IOException ,InterruptedException {
final String[] splited = value.toString().split("\t");
final NewK2 k2 = new NewK2(Long.parseLong(splited[0]), Long.parseLong(splited[1]));
final LongWritable v2 = new LongWritable(Long.parseLong(splited[1]));
context.write(k2, v2);
};
}
static class MyReducer extends Reducer<NewK2, LongWritable, LongWritable, LongWritable>{
protected void reduce(NewK2 k2, java.lang.Iterable<LongWritable> v2s, org.apache.hadoop.mapreduce.Reducer<NewK2,LongWritable,LongWritable,LongWritable>.Context context) throws java.io.IOException ,InterruptedException {
long min = Long.MAX_VALUE;
for (LongWritable v2 : v2s) {
if(v2.get()<min){
min = v2.get();
}
}
context.write(new LongWritable(k2.first), new LongWritable(min));
};
}
/**
* 问:为什么实现该类?
* 答:因为原来的v2不能参与排序,把原来的k2和v2封装到一个类中,作为新的k2
*
*/
static class NewK2 implements WritableComparable<NewK2>{
Long first;
Long second;
public NewK2(){}
public NewK2(long first, long second){
this.first = first;
this.second = second;
}
@Override
public void readFields(DataInput in) throws IOException {
this.first = in.readLong();
this.second = in.readLong();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(first);
out.writeLong(second);
}
/**
* 当k2进行排序时,会调用该方法.
* 当第一列不同时,升序;当第一列相同时,第二列升序
*/
@Override
public int compareTo(NewK2 o) {
final long minus = this.first - o.first;
if(minus !=0){
return (int)minus;
}
return (int)(this.second - o.second);
}
@Override
public int hashCode() {
return this.first.hashCode()+this.second.hashCode();
}
@Override
public boolean equals(Object obj) {
if(!(obj instanceof NewK2)){
return false;
}
NewK2 oK2 = (NewK2)obj;
return (this.first==oK2.first)&&(this.second==oK2.second);
}
}
/**
* 问:为什么自定义该类?
* 答:业务要求分组是按照第一列分组,但是NewK2的比较规则决定了不能按照第一列分。只能自定义分组比较器。
*/
static class MyGroupingComparator implements RawComparator<NewK2>{
@Override
public int compare(NewK2 o1, NewK2 o2) {
return (int)(o1.first - o2.first);
}
/**
* @param arg0 表示第一个参与比较的字节数组
* @param arg1 表示第一个参与比较的字节数组的起始位置
* @param arg2 表示第一个参与比较的字节数组的偏移量
*
* @param arg3 表示第二个参与比较的字节数组
* @param arg4 表示第二个参与比较的字节数组的起始位置
* @param arg5 表示第二个参与比较的字节数组的偏移量
*/
@Override
public int compare(byte[] arg0, int arg1, int arg2, byte[] arg3,
int arg4, int arg5) {
return WritableComparator.compareBytes(arg0, arg1, 8, arg3, arg4, 8);
}
}
}
3、data
#当第一列相同时,求出第二列的最小值
3 3
3 2
3 1
2 2
2 1
1 1
-------------------
3 1
2 1
1 1
package mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
public class KpiApp {
static final String INPUT_PATH = "hdfs://cloud4:9000/wlan";
static final String OUT_PATH = "hdfs://cloud4:9000/out";
public static void main(String[] args) throws Exception{
final Job job = new Job(new Configuration(), KpiApp.class.getSimpleName());
//1.1 指定输入文件路径
FileInputFormat.setInputPaths(job, INPUT_PATH);
//指定哪个类用来格式化输入文件
job.setInputFormatClass(TextInputFormat.class);
//1.2指定自定义的Mapper类
job.setMapperClass(MyMapper.class);
//指定输出<k2,v2>的类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(KpiWritable.class);
//1.3 指定分区类
job.setPartitionerClass(HashPartitioner.class);
job.setNumReduceTasks(1);
//1.4 TODO 排序、分区
//1.5 TODO (可选)合并
//2.2 指定自定义的reduce类
job.setReducerClass(MyReducer.class);
//指定输出<k3,v3>的类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(KpiWritable.class);
//2.3 指定输出到哪里
FileOutputFormat.setOutputPath(job, new Path(OUT_PATH));
//设定输出文件的格式化类
job.setOutputFormatClass(TextOutputFormat.class);
//把代码提交给JobTracker执行
job.waitForCompletion(true);
}
static class MyMapper extends Mapper<LongWritable, Text, Text, KpiWritable>{
protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper<LongWritable,Text,Text,KpiWritable>.Context context) throws IOException ,InterruptedException {
final String[] splited = value.toString().split("\t");
final String msisdn = splited[1];
final Text k2 = new Text(msisdn);
final KpiWritable v2 = new KpiWritable(splited[6],splited[7],splited[8],splited[9]);
context.write(k2, v2);
};
}
static class MyReducer extends Reducer<Text, KpiWritable, Text, KpiWritable>{
/**
* @param k2 表示整个文件中不同的手机号码
* @param v2s 表示该手机号在不同时段的流量的集合
*/
protected void reduce(Text k2, java.lang.Iterable<KpiWritable> v2s, org.apache.hadoop.mapreduce.Reducer<Text,KpiWritable,Text,KpiWritable>.Context context) throws IOException ,InterruptedException {
long upPackNum = 0L;
long downPackNum = 0L;
long upPayLoad = 0L;
long downPayLoad = 0L;
for (KpiWritable kpiWritable : v2s) {
upPackNum += kpiWritable.upPackNum;
downPackNum += kpiWritable.downPackNum;
upPayLoad += kpiWritable.upPayLoad;
downPayLoad += kpiWritable.downPayLoad;
}
final KpiWritable v3 = new KpiWritable(upPackNum+"", downPackNum+"", upPayLoad+"", downPayLoad+"");
context.write(k2, v3);
};
}
}
class KpiWritable implements Writable{
long upPackNum;
long downPackNum;
long upPayLoad;
long downPayLoad;
public KpiWritable(){}
public KpiWritable(String upPackNum, String downPackNum, String upPayLoad, String downPayLoad){
this.upPackNum = Long.parseLong(upPackNum);
this.downPackNum = Long.parseLong(downPackNum);
this.upPayLoad = Long.parseLong(upPayLoad);
this.downPayLoad = Long.parseLong(downPayLoad);
}
@Override
public void readFields(DataInput in) throws IOException {
this.upPackNum = in.readLong();
this.downPackNum = in.readLong();
this.upPayLoad = in.readLong();
this.downPayLoad = in.readLong();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upPackNum);
out.writeLong(downPackNum);
out.writeLong(upPayLoad);
out.writeLong(downPayLoad);
}
@Override
public String toString() {
return upPackNum + "\t" + downPackNum + "\t" + upPayLoad + "\t" + downPayLoad;
}
}
5、SortApp.java
package sort;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
public class SortApp {
static final String INPUT_PATH = "hdfs://cloud4:9000/input";
static final String OUT_PATH = "hdfs://cloud4:9000/out";
public static void main(String[] args) throws Exception{
final Configuration configuration = new Configuration();
final FileSystem fileSystem = FileSystem.get(new URI(INPUT_PATH), configuration);
if(fileSystem.exists(new Path(OUT_PATH))){
fileSystem.delete(new Path(OUT_PATH), true);
}
final Job job = new Job(configuration, SortApp.class.getSimpleName());
//1.1 指定输入文件路径
FileInputFormat.setInputPaths(job, INPUT_PATH);
//指定哪个类用来格式化输入文件
job.setInputFormatClass(TextInputFormat.class);
//1.2指定自定义的Mapper类
job.setMapperClass(MyMapper.class);
//指定输出<k2,v2>的类型
job.setMapOutputKeyClass(NewK2.class);
job.setMapOutputValueClass(LongWritable.class);
//1.3 指定分区类
job.setPartitionerClass(HashPartitioner.class);
job.setNumReduceTasks(1);
//1.4 TODO 排序、分区
//1.5 TODO (可选)合并
//2.2 指定自定义的reduce类
job.setReducerClass(MyReducer.class);
//指定输出<k3,v3>的类型
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(LongWritable.class);
//2.3 指定输出到哪里
FileOutputFormat.setOutputPath(job, new Path(OUT_PATH));
//设定输出文件的格式化类
job.setOutputFormatClass(TextOutputFormat.class);
//把代码提交给JobTracker执行
job.waitForCompletion(true);
}
static class MyMapper extends Mapper<LongWritable, Text, NewK2, LongWritable>{
protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper<LongWritable,Text,NewK2,LongWritable>.Context context) throws java.io.IOException ,InterruptedException {
final String[] splited = value.toString().split("\t");
final NewK2 k2 = new NewK2(Long.parseLong(splited[0]), Long.parseLong(splited[1]));
final LongWritable v2 = new LongWritable(Long.parseLong(splited[1]));
context.write(k2, v2);
};
}
static class MyReducer extends Reducer<NewK2, LongWritable, LongWritable, LongWritable>{
protected void reduce(NewK2 k2, java.lang.Iterable<LongWritable> v2s, org.apache.hadoop.mapreduce.Reducer<NewK2,LongWritable,LongWritable,LongWritable>.Context context) throws java.io.IOException ,InterruptedException {
context.write(new LongWritable(k2.first), new LongWritable(k2.second));
};
}
/**
* 问:为什么实现该类?
* 答:因为原来的v2不能参与排序,把原来的k2和v2封装到一个类中,作为新的k2
*
*/
static class NewK2 implements WritableComparable<NewK2>{
Long first;
Long second;
public NewK2(){}
public NewK2(long first, long second){
this.first = first;
this.second = second;
}
@Override
public void readFields(DataInput in) throws IOException {
this.first = in.readLong();
this.second = in.readLong();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(first);
out.writeLong(second);
}
/**
* 当k2进行排序时,会调用该方法.
* 当第一列不同时,升序;当第一列相同时,第二列升序
*/
@Override
public int compareTo(NewK2 o) {
final long minus = this.first - o.first;
if(minus !=0){
return (int)minus;
}
return (int)(this.second - o.second);
}
@Override
public int hashCode() {
return this.first.hashCode()+this.second.hashCode();
}
@Override
public boolean equals(Object obj) {
if(!(obj instanceof NewK2)){
return false;
}
NewK2 oK2 = (NewK2)obj;
return (this.first==oK2.first)&&(this.second==oK2.second);
}
}
}
6、TopKApp.java
package suanfa;
import java.net.URI;
import mapreduce.WordCountApp;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* 作业:求最大的100个值
*/
public class TopKApp {
static final String INPUT_PATH = "hdfs://cloud4:9000/input";
static final String OUT_PATH = "hdfs://cloud4:9000/out";
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
final FileSystem fileSystem = FileSystem.get(new URI(INPUT_PATH), conf);
final Path outPath = new Path(OUT_PATH);
if(fileSystem.exists(outPath)){
fileSystem.delete(outPath, true);
}
final Job job = new Job(conf , WordCountApp.class.getSimpleName());
FileInputFormat.setInputPaths(job, INPUT_PATH);
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(NullWritable.class);
FileOutputFormat.setOutputPath(job, outPath);
job.waitForCompletion(true);
}
static class MyMapper extends Mapper<LongWritable, Text, LongWritable, NullWritable>{
long max = Long.MIN_VALUE;
protected void map(LongWritable k1, Text v1, Context context) throws java.io.IOException ,InterruptedException {
final long temp = Long.parseLong(v1.toString());
if(temp>max){
max = temp;
}
};
protected void cleanup(org.apache.hadoop.mapreduce.Mapper<LongWritable,Text,LongWritable, NullWritable>.Context context) throws java.io.IOException ,InterruptedException {
context.write(new LongWritable(max), NullWritable.get());
};
}
static class MyReducer extends Reducer<LongWritable, NullWritable, LongWritable, NullWritable>{
long max = Long.MIN_VALUE;
protected void reduce(LongWritable k2, java.lang.Iterable<NullWritable> arg1, org.apache.hadoop.mapreduce.Reducer<LongWritable,NullWritable,LongWritable,NullWritable>.Context arg2) throws java.io.IOException ,InterruptedException {
final long temp = k2.get();
if(temp>max){
max = temp;
}
};
protected void cleanup(org.apache.hadoop.mapreduce.Reducer<LongWritable,NullWritable,LongWritable,NullWritable>.Context context) throws java.io.IOException ,InterruptedException {
context.write(new LongWritable(max), NullWritable.get());
};
}
}