MapReduce增强
reducetask分区
相同key的数据,去往同一个reduce
reducetask的数量默认一个,可以根据 job.setNumReduceTasks(3);
手动设定,比如3个。
分区示例
需求:
这个文本文件,其中第五个字段表示开奖结果数值,现在需求将15以上的结果以及15以下的结果进行分开成两个文件进行保存
流程分析
代码实现
如果手动指定分区号,必须打包到集群上运行,本地会报错。
PartitionMain
package cn.nina.mr.demo1;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class PartitionMain extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
//获取job对象,封装job任务
Job job = Job.getInstance(super.getConf(), "myPartition");
//达成jar包到集群运行需要这句
job.setJarByClass(PartitionMain.class);
//第一步:读取文件解析成k1,v1
TextInputFormat.addInputPath(job,new Path(args[0]));
job.setInputFormatClass(TextInputFormat.class);
//第二步,自定义mapper逻辑
job.setMapperClass(PartitionMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//第三步:分区,相同key的数据发到一个reduce中
job.setPartitionerClass(PartitionOwn.class);
//第四步:排序
//第五步:规约
//第六步:分组
//第七步:reduce逻辑
job.setReducerClass(PartitionReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//一定要设置reduce task的数量!
//如果reduce task数量比分区多,就会有空文件,如果少,就会有些reduce处理更多的数据
job.setNumReduceTasks(2);
//第八步:输出
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job,new Path(args[1]));
boolean b = job.waitForCompletion(true);
return b?0:1;
}
public static void main(String[] args) throws Exception {
int run = ToolRunner.run(new Configuration(), new PartitionMain(), args);
System.exit(run);
}
}
PartitionMapper
package cn.nina.mr.demo1;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class PartitionMapper extends Mapper<LongWritable,Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//输出k2,v2,其中k2是一行文本数据,v2里面什么也没有
context.write(value,NullWritable.get());
}
}
PartitionOwn
package cn.nina.mr.demo1;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
public class PartitionOwn extends Partitioner<Text, NullWritable> {
@Override
public int getPartition(Text text, NullWritable nullWritable, int i) {
String line = text.toString();
String[] split = line.split("\t");
if (Integer.parseInt(split[5]) > 15){
return 0;
}else{
return 1;
}
}
}
PartitionReducer
package cn.nina.mr.demo1;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class PartitionReducer extends Reducer<Text, NullWritable,Text,NullWritable> {
//输出数据
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
context.write(key,NullWritable.get());
}
}
Pom文件
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>java_hadoop</artifactId>
<groupId>org.example</groupId>
<version>1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>day04_mr</artifactId>
<repositories>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0-mr1-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/junit/junit -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
<!-- <verbal>true</verbal>-->
</configuration>
</plugin>
<!-- jar with dependencies 打包插件-->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.4.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<minimizeJar>true</minimizeJar>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
打包jar包后的运行步骤
分区jar包运行:
- hdfs dfs -mkdir /partitionin
- 上传partition.csv文件至server文件夹
- hdfs dfs -put partition.csv /partitionin
- 上传 original-day04_mr-1.0-SNAPSHOT.jar至server文件夹
- hadoop jar original-day04_mr-1.0-SNAPSHOT.jar cn.nina.mr.demo1.PartitionMain /partitionin /partitionout
//注意这里输入路径默认在hdfs上找,输出路径需要设置原本不存在的路径
MapReduce的序列化
序列化(Serialization)是指把结构化对象转化为字节流。
反序列化(Deserialization)是序列化的逆过程。把字节流转为结构化对象。
当要在进程间传递对象或持久化对象的时候,就需要序列化对象成字节流
反之当要将接收到或从磁盘读取的字节流转换为对象,就要进行反序列化。
hadoop当中没有沿用java序列化serialize方式,使用的是writable接口,实现了writable接口就可以序列化。
另外Writable有一个子接口是WritableComparable,既可实现序列化,也可以对key进行比较。
这里通过自定义key实现WritableComparable来实现排序功能。
MapReduce排序
mapreduce当中的排序的功能:
默认是有排序功能的,按照字典顺序来进行排序,对key2进行排序
如果需要序列化,需要实现 writable接口
如果需要排序 需要实现 comparable接口
如果既需要序列化,也需要排序 可以实现 writable和comparable 或者 WritableComparable。
数据格式内容如下
a 1
a 9
b 3
a 7
b 8
b 10
a 5
a 9
现在要求,对数据进行排序二次排序,如果第一列相等,那么就比较第二列的值。
把这两个字段封装成一个javaBean当做我们k2
排序示例
javabean代码
如果需要第二列数据倒序,可以输出-i1
如果需要第一列数据倒叙,可以输出-i
package cn.nina.mr.demo2;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class K2Bean implements WritableComparable<K2Bean> {
private String first;
private int second;
//比较排序
@Override
public int compareTo(K2Bean o) {
//首先比较第一个字段,相同再比较第二个字段
int i = this.first.compareTo(o.first);
if (i == 0){
int i1 = Integer.valueOf(this.second).compareTo(Integer.valueOf(o.second));
return i1;
}else{
//直接将比较结果返回
return i;
}
}
//序列化
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(first);
out.writeInt(second);
}
//反序列化
@Override
public void readFields(DataInput in) throws IOException {
this.first = in.readUTF();
this.second = in.readInt();
}
public String getFirst() {
return first;
}
public void setFirst(String first) {
this.first = first;
}
public int getSecond() {
return second;
}
public void setSecond(int second) {
this.second = second;
}
@Override
public String toString() {
return first + '\t' + second;
}
}
Mapper
package cn.nina.mr.demo2;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class SortMapper extends Mapper<LongWritable,Text,K2Bean, NullWritable> {
//读取数据,封装到K2
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] split = value.toString().split("\t");
K2Bean k2Bean = new K2Bean();
k2Bean.setFirst(split[0]);
k2Bean.setSecond(Integer.parseInt(split[1]));
context.write(k2Bean,NullWritable.get());
}
}
Reducer
package cn.nina.mr.demo2;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class SortReducer extends Reducer<K2Bean,NullWritable,K2Bean,NullWritable> {
@Override
protected void reduce(K2Bean key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
for (NullWritable value: values) {
//直接将排序后的数据直接输出
context.write(key, NullWritable.get());
}
}
}
Main
package cn.nina.mr.demo2;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class SortMain extends Configured implements Tool {
@Override
public int run(String[] strings) throws Exception {
Job job = Job.getInstance(super.getConf(), "sort");
//1.解析文件,k1,v1
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job, new Path("file:///C:\\Users\\Yichun\\Desktop\\hadoop\\hadoop_04\\排序\\input"));
//2.自定义map
job.setMapperClass(SortMapper.class);
job.setMapOutputKeyClass(K2Bean.class);
job.setMapOutputValueClass(NullWritable.class);
//3-6步省掉
//7.reduce
job.setReducerClass(SortReducer.class);
job.setOutputKeyClass(K2Bean.class);
job.setOutputValueClass(NullWritable.class);
//8.输出
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job,new Path("file:///C:\\Users\\Yichun\\Desktop\\hadoop\\hadoop_04\\排序\\output"));
//提交任务
boolean b = job.waitForCompletion(true);
return b?0:1;
}
public static void main(String[] args) throws Exception {
int run = ToolRunner.run(new Configuration(), new SortMain(), args);
System.exit(run);
}
}
MapReduce计数器
示例
可以计算Map阶段输入多少数据,加入Mapper 类
Counter counter = context.getCounter("MR_COUNT", "MapRecordCounter");
counter.increment(1L);
combiner规约
规约combiner:是发生在map端的一个小型的reduce,接收我们的k2 v2 输出k2 v2
combiner不能够改变我们的数据的结果值,每一个 map 都可能会产生大量的本地输出,Combiner 的作用就是对 map 端的输出先做一次合并,以减少在 map 和 reduce 节点之间的数据传输量,以提高网络IO 性能,是 MapReduce 的一种优化手段之一,减少发送到reduce端的数据量。
具体实现步骤:
- 自定义一个 combiner 继承 Reducer,重写 reduce 方法
- 在 job 中设置: job.setCombinerClass(CustomCombiner.class)
combiner 能够应用的前提是不能影响最终的业务逻辑,而且,combiner 的输出 kv 应该跟 reducer 的输入 kv 类型要对应起来
注意:
求平均值的情况不能用combiner。