MapReduce 2.7.7
pom依赖
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>xuying</groupId>
<artifactId>maven_model</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<apache.hadoop.version>2.7.7</apache.hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${apache.hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${apache.hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${apache.hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>RELEASE</version>
<scope>compile</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>2.4</version>
<configuration>
<archive>
<manifest>
<addClasspath>true</addClasspath>
<classpathPrefix>lib/</classpathPrefix>
<mainClass>cxy.develop.hadoop.mr.MapReduceMain</mainClass>
</manifest>
</archive>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
</plugins>
</build>
</project>
Map阶段
按顺序LongWritable, Text,Text, IntWritable , 分别是map输入key , 输入value , 输出key , 输出value
输入key一般是LongWritable , 是输入文件每一行的偏移量
输入value一般是Text , 是要处理的字符串文本 , 是输入文件每一行的数据
输出key是map输出的key , 根据业务需要来
输出value是map输出的value , 根据业务需要来 , 可以自定义
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class MrMap extends Mapper<LongWritable, Text,Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
if(!StringUtils.isEmpty(line)){
String[] split = line.split(",");
for (int i = 0; i < split.length; i++) {
String word = split[i];
context.write(new Text(word),new IntWritable(1));
}
}
}
}
比如我的输入文件如下 , map阶段会读取每一行 , 读取一行会调用重写的map方法一次
读取第一行的时候,map的输入key是0,valu是sdasd,wwq,ttew,ddvve,sssaw ,
我的代码里面调用了五次context.write输出的<"sdasd",1> <"wwq",1> <"ttew",1> <"ddvve",1> <"sssaw ",1>
以此类推读取第二行的时候输出<"apple",1> <"ban",1> <"asd",1> <"apple",1> <"haha",1>
第二行输出<"ban",1>
然后这个map结束 , 对这次map的输出分组
<"apple",1> <"apple",1> <"ban",1> <"ban",1> <"asd",1> <"haha",1> <"sdasd",1> <"wwq",1> <"ttew",1> <"ddvve",1> <"sssaw ",1>
map分组后进入reduce
reduce阶段
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class MrReduce extends Reducer<Text, IntWritable,Text,IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable temp:values) {
sum += temp.get();
}
context.write(key,new IntWritable(sum));
}
}
按顺序Text, IntWritable,Text,IntWritable , 分别是reduce输入key , 输入value , 输出key , 输出value
reduce接收的是key和迭代器 , 比如上面例子输入到reduce是
<"apple" , [ 1 ,1 ]><"ban",[ 1 ,1 ]> <"asd",1> <"haha",[ 1 ]> <"sdasd",[ 1 ]> <"wwq",[ 1 ]> <"ttew",[ 1 ]> <"ddvve",[ 1 ]> <"sssaw ",[ 1 ]>
然后reduce遍历迭代器,对值相加,就实现对每个词的统计
job创建
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class MapReduceMain {
public static void main(String[] args) {
Job job = null;
try {
Configuration conf = new Configuration();
System.setProperty("HADOOP_USER_NAME","root");
job = Job.getInstance(conf,"cxy-job-name");
job.setJarByClass(MapReduceMain.class);
job.setMapperClass(MrMap.class);
job.setReducerClass(MrReduce.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setNumReduceTasks(1);//设置reduceTask个数,默认是1,如果设置其他数字,会对数字取模,得到reduceTask个数
FileInputFormat.addInputPath(job, new Path("/cxytest/src.txt"));
FileOutputFormat.setOutputPath(job,new Path("/cxytest/res"));
boolean b = job.waitForCompletion(true);
System.exit(b?0:1);
} catch (Exception e) {
e.printStackTrace();
}
}
}
打成jar包 , 上传hadoop环境 , hadoop运行jar包
hadoop jar maven_model-1.0-SNAPSHOT.jar
运行之后的结果
切割划分输入数据
默认是TextInputFormat默认读取数据组件读取数据每一行<偏移量,那一行数据>
处理完之后,每个maptask处理完的输出数据会分组并排序流入reducetask
如果设置了NumReduceTasks个数(默认是1),则每个maptask输出的数据也会根据一定规则(比如key的hashcode)流入不同的reducetask,按照key是否相同作为一组调用重写的reduce方法
最后在TextOutputFormat默认输出数据组件
排序
reduce输出时默认对key按字典排序,自定义的类可以实现comparable
map->reduce前,
可以先进行局部合并,Combiner,是一种优化手段,默认不会使用
继承reduce
job.setCombinerClass(MrReduce.class);//局部聚合,如果业务适合,可以直接使用reduce类
map->reduce时,
如果设置了NumReduceTasks有分区,默认hashcode取模来分区,也可以自定义partitioner来按特定规则分区,然后job.set
关于一些细节问题
关于mapreduce阶段流程https://www.cnblogs.com/SparseMatrix/p/5255523.html