wordCount是hadoop下mapReduce下的一个基本的入门程序,用来统计一系列文本中单词出现的次数(最基本的用法)。Mapreduce是一个分布式运算程序的编程框架,是用户开发“基于hadoop的数据分析应用”的核心框架。采用“分而治之”的办法,Mapper负责“分”,Reducer负责统计汇总。
#自定义Mapper部分
package com.vm.mapreduce;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
/**
* Map部分
*
* @author 1
*
*/
public class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
// value表示,每一行的文本内容
String line = value.toString();
String[] split = line.split(" ");// 以空格来分割单词
for (String word : split) {
context.write(new Text(word), new LongWritable(1L));
}
}
}
#自定以Reducer部分
package com.vm.mapreduce;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
* Reducer部分
*
* @author 1
*
*/
public class MyReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
@Override
protected void reduce(Text key, Iterable<LongWritable> values,
Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
long count = 0L;
for (LongWritable value : values) {
count += value.get();
}
context.write(key, new LongWritable(count));
}
}
#代码测试
package com.vm.mapreduce;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* 测试代码2
*
* @author 1
*
*/
public class WordCount2 extends Configured implements Tool {
//文件输入路径
public static final String INPUT_PATH = "hdfs://mini1:9000/wordcount/input/words.txt";
//文件输出路径
public static final String OUTPUT_PATH = "hdfs://mini1:9000/wordcount/output8";
@Override
public int run(String[] arg0) throws Exception {
// 判断并删除已经存在的路径和文件
System.setProperty("HADOOP_USER_NAME", "hadoop");
FileSystem fs = FileSystem.get(new URI(OUTPUT_PATH), getConf(), "hadoop");
Path outPath = new Path(OUTPUT_PATH);
if (fs.exists(outPath)) {
fs.delete(outPath, true);
System.out.println("删除路径。。。。OK");
}
@SuppressWarnings("deprecation")
Job job = new Job(getConf(), "wdCount");
// 指定输入目录
FileInputFormat.setInputPaths(job, new Path(INPUT_PATH));
// 置顶自定义的mapper类
job.setMapperClass(MyMapper.class);
// 置顶mapper输出的<k,v>类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
// 置顶要运行的Reducer类
job.setReducerClass(MyReducer.class);
// 置顶reduce的输出的<k,v>类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
// 设置输出目录
FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH));
// *********提交作业***********
boolean success = job.waitForCompletion(true);
System.exit(success ? 0 : 1);
return success ? 0 : 1;
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
int res = ToolRunner.run(conf, new WordCount2(), args);
System.out.println("=================");
System.exit(res);
}
}