目录
下面,我们根据官方的wordcount案例,自己手写一个wordcount的程序,思路是这样的,一个wordcountMapper类继承Mapper类,一个继承Reducer的类,加上一个驱动类。在这之前,我们在分析一下这两个父类。
Mapper类
public class Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
public abstract class Context
implements MapContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
}
/**
* Called once at the beginning of the task.
*/
protected void setup(Context context
) throws IOException, InterruptedException {
}
/**
* Called once for each key/value pair in the input split.
*/
@SuppressWarnings("unchecked")
protected void map(KEYIN key, VALUEIN value,
Context context) throws IOException, InterruptedException {
context.write((KEYOUT) key, (VALUEOUT) value);
}
protected void cleanup(Context context
) throws IOException, InterruptedException {
}
//run方法将所有的方法串联起来,这里面,每一行都会执行一下map方法,完成单词的统计
public void run(Context context) throws IOException, InterruptedException {
setup(context);
try {
while (context.nextKeyValue()) {
map(context.getCurrentKey(), context.getCurrentValue(), context);
}
} finally {
cleanup(context);
}
}
}
Reducer类:
public class Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
public abstract class Context
implements ReduceContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
}
/**
* Called once at the start of the task.
*/
protected void setup(Context context
) throws IOException, InterruptedException {
}
/**
* This method is called once for each key.
*/
@SuppressWarnings("unchecked")
protected void reduce(KEYIN key, Iterable<VALUEIN> values, Context context
) throws IOException, InterruptedException {
for(VALUEIN value: values) {
context.write((KEYOUT) key, (VALUEOUT) value);
}
}
/**
* Called once at the end of the task.
*/
protected void cleanup(Context context
) throws IOException, InterruptedException {
}
public void run(Context context) throws IOException, InterruptedException {
setup(context);
try {
while (context.nextKey()) {
reduce(context.getCurrentKey(), context.getValues(), context);
// If a back up store is used, reset it
Iterator<VALUEIN> iter = context.getValues().iterator();
if(iter instanceof ReduceContext.ValueIterator) {
((ReduceContext.ValueIterator<VALUEIN>)iter).resetBackupStore();
}
}
} finally {
cleanup(context);
}
}
}
环境的搭建:
首先我们创建一个maven工程,并添加依赖:
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.isea.mapreduce</groupId>
<artifactId>mapreduce</artifactId>
<version>1.0-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>RELEASE</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.2</version>
</dependency>
</dependencies>
</project>
然后添加log4j.properties文件,
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
Map过程:
package com.isea.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WordCountMapper extends Mapper<LongWritable, Text,Text, IntWritable> {
private Text k = new Text();
private IntWritable v = new IntWritable(1);
@Override//map方法每一个<k,v>都会调用一次map方法
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1,获取一行
String line = value.toString();
// 2,切割
String[] words = line.split(" ");
// 3,输出
for (String word : words) {
k.set(word);
context.write(k,v);
}
}
}
Reducer过程:
package com.isea.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WordCountReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
private int sum = 0;
private IntWritable v = new IntWritable();
@Override//this method is called once for each key
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
// 1,累加求和
for (IntWritable value : values) {
sum += value.get();
//相当于sum++
}
// 2,输出
v.set(sum);
context.write(key,v);
}
}
Driver过程:
package com.isea.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import java.io.IOException;
public class WordCountDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
// 1,获取配置信息,获取job任务实例
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
// 2,指定本程序的jar包所在的本地路径
job.setJarByClass(WordCountDriver.class);
// 3,使job关联Mapper、Reducer业务类
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4,指定Mapper的输出数据的<k,v>类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5,指定最终的数据的<k,v>类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 6,指定job的输入文件的路径和输出路径(该目录必须不存在)
FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
// 7,提交作业
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
运行程序,在运行参数中添加参数:
其中input的输入文件和output文件的输出结果如下:
Debug过程理解MapReduce过程:
程序断点处:
Mapper的获取一行的位置;Reducer的for循环求和处
Map第一行数据:
Map取最后一行数据:
程序运行到目前止,并没有进入Reduce阶段,说明MapReduce是Map结束了之后才进行Reduce的,接下来进入reduce过程:
resume一次之后,得到其他的word的数量:
跑在集群上
我们上文中实现的单词统计的代码其实和官网上给我们实例是差不多的,因此我们可以将我们的程序打包放在集群上跑:
首先添加如下的插件:
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>2.3.2</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin </artifactId>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
<archive>
<manifest>
<mainClass>com.isea.mr.WordCountDriver</mainClass>
</manifest>
</archive>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
然后对该项目打包:
然后将打包的jar包上传到集群中:
接下来执行:
[isea@hadoop108 hadoop-2.7.2]$ hadoop jar wc.jar com.isea.mr.WordCountDriver /isea/input/ /isea/output