学习阶段尝试写的第一个MapReduce程序,测试成功跑过。没有什么技术难度。记录下来,以备复习使用。
Mapper代码如下:
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WordCountMapper extends
Mapper<LongWritable, Text, Text, LongWritable> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
// 获取一行数据的内容
String line = value.toString();
// 切分一行的内容为一个单词数组
String[] words = StringUtils.split(line, " ");
// 输出<word,1>
for (String word: words) {
context.write(new Text(word), new LongWritable(1));
}
}
}
Reducer代码如下:
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class WordCountReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context)
throws IOException, InterruptedException {
// 定义一个累加计数器
long count = 0;
for (LongWritable value: values) {
count += value.get();
}
// 输出单词键值对
context.write(key, new LongWritable(count));
}
}
主程序代码如下:
package com.ligy.mapreduce.wordcount;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* 用来描述一个job(使用哪个mapper类,哪个reducer类,输入文件在哪,输出结果放在哪)
* 然后提交job给Hadoop集群
* @author gyli
*
*/
// com.ligy.mapreduce.wordcount.WordCountRunner
public class WordCountRunner {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
conf.set("mapreduce.job.jar", "wcjob.jar");
Job wcjob = Job.getInstance(conf);
// 设置wcjob中的资源所在的jar包所
wcjob.setJarByClass(WordCountRunner.class);
// wcjob 要使用哪个mapper类
wcjob.setMapperClass(WordCountMapper.class);
// wcjob 要使用哪个Reducer类
wcjob.setReducerClass(WordCountReducer.class);
// wcjob的mapper类输出的kv数据的类型
wcjob.setMapOutputKeyClass(Text.class);
wcjob.setMapOutputValueClass(LongWritable.class);
// wcjob的reducer类输出的kv数据的类型
wcjob.setOutputKeyClass(Text.class);
wcjob.setOutputValueClass(LongWritable.class);
// 指定要处理的原始数据所存放的路径
FileInputFormat.setInputPaths(wcjob, "hdfs://master:9000/data/test/GenericDemo5.java");
// FileInputFormat.setInputPaths(wcjob, "C:\\Users\\gyli\\Desktop\\word\\WordCountMapper.java");
// 指定处理之后的结果输出到什么路径
FileOutputFormat.setOutputPath(wcjob, new Path("hdfs://master:9000/data/test/output5"));
// FileOutputFormat.setOutputPath(wcjob, new Path("C:\\Users\\gyli\\Desktop\\word\\output"));
boolean res = wcjob.waitForCompletion(true);
System.exit(res ? 0 : 1);
}
}