原文:https://www.cnblogs.com/jetdw/p/7238105.html?utm_source=itdadao&utm_medium=referral
1、mapreduce代码
package test.mapreduce;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
/**
* 该例子为网上经典例子统计单词出现次数
*
*/
public class WordCount {
public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
/**
* key 偏移量包括了回车所占的字符数(Windows和Linux环境会不同)
* value 一行数据
* context存储新Map的对象
*/
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
/**
* key 为Map中的key,hadoop会把相同key的内容合并为一个list,该list就为values。
* context为存放结果的对象
*/
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException,
InterruptedException {
int sum = 0;
for (IntWritable val : values) {
//累加每一个value
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
Job job = Job.getInstance();
//设置本次job作业使用的mapper类是那个
job.setJarByClass(WordCount.class);
//本次job作业使用的mapper类是那个?
job.setMapperClass(TokenizerMapper.class);
//本次job作业使用的reducer类是那个
job.setCombinerClass(IntSumReducer.class);
//本次job作业使用的reducer类是那个
job.setReducerClass(IntSumReducer.class);
//本次job作业使用的reducer类的输出数据key类型
job.setOutputKeyClass(Text.class);
//本次job作业使用的reducer类的输出数据value类型
job.setOutputValueClass(IntWritable.class);
job.setNumReduceTasks(1);//设置reduce的个数
//判断output文件夹是否存在,如果存在则删除
Path path = new Path(otherArgs[1]);
FileSystem fileSystem =path.getFileSystem(conf);//根据path找到这个文件夹
if(fileSystem.exists(path)) {
fileSystem.delete(path,true);//true的意思是,就算output里面有东西,也一带删除
}
//本次job作业要处理的原始数据所在的路径
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
//本次job作业产生的结果输出路径
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
//提交本次作业
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
2、在hadoop中运行
hadoop jar /usr/local/hadoop_demo.jar test.mapreduce.WordCount /bbb.txt /output
hadoop jar 【jar包文件】 【jar包中main函数所在的类(包括包名)】 【需统计的文件】 【输出路径】