1.pom.xml
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.bogdata.mr.wcdemo</groupId>
<artifactId>WordcountMapper</artifactId>
<version>0.0.1-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.6.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>2.6.4</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-common</artifactId>
<version>2.6.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.6.4</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>2.6</version>
</dependency>
<dependency>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
<version>1.7</version>
<scope>system</scope>
<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
</dependency>
</dependencies>
</project>
2.WordcountMapper
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
/**
* KEYIN:默认情况下,是mr框架所读到的一行文本的起始偏移量,Long,但是数据
* 要经过网络传输就需要序列化,hadoop中有自己更精简的序列化接口,所以不用
* Long,而用LongWritable
*
* VALUEIN:默认情况下,是mr框架所读到的一行文本内容,String,同上,用Text
*
* KEYOUT:是用户自定义逻辑处理完成后输出数据的key,此处是单词,String,同上,用Text
* VALUEOUT:单词次数,integer,同上,用intwriable
* @author bhj
*
*/
public class WordcountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
/**
* map阶段的业务逻辑就写在自定义的map()方法中
* maptask会对每一行输入数据调用一次我们自定义的map()方法
* maptask会把东西收集在一起形成个文件再发给reduce
*/
@Override
protected void map(LongWritable key, Text value,Context context) throws IOException, InterruptedException {
//将maptask传给我们的文本内容先转换为string
String line = value.toString();
//根据空格将这一行切分成单词
String[] words = line.split(" ");
//将单词输出为<单词,1>
for (String word : words) {
//将单词作为key,将次数1作为value,以便于后续的数据分发,可以根据单词分发,以便于单词会到相同的reduce task
//起始mapper已经定义好了类型
context.write(new Text(word), new IntWritable(1));
}
}
}
3.WordcountReducer
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
* KEYIN, VALUEIN与mapper输出的KEYOUT, VALUEOUT对应
*
* KEYOUT, VALUEOUT是自定义reduce逻辑处理结果的输出数据类型
* KEYOUT 是单词
* VALUEOUT是总次数
* @author bhj
*
*/
public class WordcountReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
/**
* <hello,1>,<hello,1>,<hello,1>,<hello,1>,<hello,1>,把所有的1加起来就是总数
* <banana,1>,<banana,1>,<banana,1>,<banana,1>
* 输入参数key,是一组相同单词kv对的key
*/
@Override
protected void reduce(Text key, Iterable<IntWritable> values,Context context)throws IOException, InterruptedException {
int count = 0;
/**
* 写法一
* Iterator<IntWritable> iterator = values.iterator();
while (iterator.hasNext()) {
count += iterator.next().get();
}
*/
//第二种写法
for (IntWritable value : values) {
count += value.get();
}
//会写在一个文件中,每个reduce task会写一个文件
context.write(key, new IntWritable(count));
}
}
4.WordcountDriver
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* 相当于一个yarn集群的客户端
* 需要在此封装我们的mr程序的相关参数,指定jar包
* 最后交给yarn,请求yarn,然后把jar发过去
* @author bhj
*
*/
public class WordcountDriver {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
//这两个参数不需要配置,运行在linux系统时,hadoop有配置文件
//conf.set("mapreduce.framework.name", "yarn");
//conf.set("yarn.resourcemanager.hostname", "node1");
//传入一些默认配置
Job job = Job.getInstance(conf);
// job.setJar("/home/hadoop/wc.jar"); 不好
//指定本程序的jar包所在的本地路径
job.setJarByClass(WordcountDriver.class);
//指定本业务job要使用的mapper业务类
job.setMapperClass(WordcountMapper.class);
job.setReducerClass(WordcountReducer.class);
//指定mapper输出数据的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//指定最终输出的数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//指定job的输入原始文件所在目录,可以写多个目录
FileInputFormat.setInputPaths(job,new Path(args[0]));
//指定job的输出结果所在目录
FileOutputFormat.setOutputPath(job,new Path(args[1]));
/**
* 将job中配置的相关参数,以及job所用的java类所在的jar包,提交给yarn去运行
* job.submit(); Submit the job to the cluster and return immediately.
* job.waitForCompletion(true); Submit the job to the cluster and wait for it to finish
* true表示要返回结果
*/
boolean completion = job.waitForCompletion(true);
//终止当前正在运行的 Java 虚拟机。参数用作状态码;根据惯例,非 0 的状态码表示异常终止。
System.exit(completion?0:1);
}
}