A、简介
一、概述
1. MapReduce是Hadoop提供的一套用于进行分布式计算的框架
2. 将计算过程拆分为2个阶段:Map(映射)阶段和Reduce(规约)阶段
MapReduce在对文件进行计算的时候,会先将文件进行切片(注:切片和切块不一样),每一个切片对应的
MapTask默认情况下,每一个MapTask在拿到切片之后会进行按行读取按行处理。
入门例子:
统计字母的数量
package cn.Ajaxtxdy.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
// 需要指定Map阶段的处理逻辑
// MapReduce中,要求所有的对象都能够被序列化
// MapReduce所默认采取的序列化机制是AVRO
// KEYIN - 输入的键的类型 - 默认输入的键表示的是一行数据的字节偏移量
// VALUEIN - 输入的值的类型 - 默认输入的值表示的是读取的一行数据
// KEYOUT - 输出的键的类型
// VALUEOUT - 输出的值的类型
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
// key - 行偏移量
// value - 一行数据
// context - 用于数据传递和指定环境参数
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
// hello tom hello bob
// 先将这一行中每一个单词拆分出来
String[] arr = value.toString().split(" ");
// hello 2 tom 1 bob 1
// hello 1 tom 1 hello 1 bob 1
for (String str : arr) {
context.write(new Text(str), new IntWritable(1));
}
}
}
package cn.Ajaxtxdy.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
// Reduce的数据从Map来,那么也就意味着Map的输出就是Reduce的输入
// KEYIN, VALUEIN
// 最后应该是输出每一个单词对应的总次数
// KEYOUT, VALUEOUT
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
// key - 输入的键
// values - 键所对应的所有的值
// hello
// values = 1,1,1,1,1,1,1...
// context - 将结果写到HDFS上
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
}
package cn.Ajaxtxdy.wordcount;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCountDriver {
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
// System.setProperty("hadoop.home.dir","hadoop安装路径");
// 先向Hadoop申请一个job任务执行逻辑
Job job = Job.getInstance();
// 设置入口类
job.setJarByClass(WordCountDriver.class);
// 设置Mapper类
job.setMapperClass(WordCountMapper.class);
// 设置Reducer类
job.setReducerClass(WordCountReducer.class);
// 设置Mapper的输出类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置Reducer的输出类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 设置输入文件
FileInputFormat.setInputPaths(job, new Path("hdfs://192.168.1.1:9000/txt/words.txt"));
// 设置输出路径
// 输出路径要求不存在
FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.1.1:9000/result/wordcount"));
// 启动执行
job.waitForCompletion(true);
}
}
例1:统计文件中每一个非空字符的个数
package cn.Ajaxtxdy.charcount;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class CharCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 拆分字符
char[] cs = value.toString().replaceAll("\\s", "").toCharArray();
for (char c : cs) {
context.write(new Text(c + ""), new LongWritable(1));
}
}
}
package cn.Ajaxtxdy.charcount;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class CharCountReducer
extends Reducer<Text, LongWritable, Text, LongWritable> {
public void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
long sum = 0;
for (LongWritable val : values) {
sum += val.get();
}
context.write(key, new LongWritable(sum));
}
}
package cn.Ajaxtxdy.charcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class CharCountDriver {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "JobName");
job.setJarByClass(cn.Ajaxtxdy.charcount.CharCountDriver.class);
job.setMapperClass(CharCountMapper.class);
job.setReducerClass(CharCountReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileInputFormat.setInputPaths(job, new Path("hdfs://192.168.1.1:9000/txt/characters.txt"));
FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.1.1:9000/result/charcount"));
if (!job.waitForCompletion(true))
return;
}
}
例2:IP去重
package cn.Ajaxtxdy.ip;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class IPMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
context.write(value, NullWritable.get());
}
}
package cn.Ajaxtxdy.ip;
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class IPReducer extends Reducer<Text, NullWritable, Text, NullWritable> {
// key = IP
// value = null,null,null
public void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}
package cn.Ajaxtxdy.ip;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class IPDriver {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "JobName");
job.setJarByClass(cn.tedu.Ajaxtxdy.IPDriver.class);
job.setMapperClass(IPMapper.class);
job.setReducerClass(IPReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
// 如果输入的路径是指定文件,则只读取这个文件
// 如果输入的路径是目录,则读取这个目录下所有的文件
FileInputFormat.setInputPaths(job, new Path("hdfs://192.168.1.1:9000/txt/ip.txt"));
FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.1.1:9000/result/ip"));
if (!job.waitForCompletion(true))
return;
}
}
例3:统计每一个篮球运动员的总得分
package cn.Ajaxtxdy.totalscore;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class TotalScoreMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// LBJ 39
String[] arr = value.toString().split(" ");
context.write(new Text(arr[0]),
new IntWritable(Integer.parseInt(arr[1])));
}
}
package cn.Ajaxtxdy.totalscore;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class TotalScoreReducer
extends Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
}
package cn.Ajaxtxdy.totalscore;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output