package mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.VLongWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.util.Map;
public class WordCount {
static {
System.setProperty("hadoop.home.dir","D:\\soft\\hadoop\\hadoop-2.9.2");
}
public static class MyMapper extends Mapper<LongWritable, Text,Text, LongWritable> {
/**
*
* @param key 行索引
* @param value 每行数据
* @param context mr中的上下文环境
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//1.将每行的数据拆分为数组
String line = value.toString();
String[] words = line.split(" ");
//2.将各个单词映射成一个键值对的方式<k,v>
for (String word : words
) {
//3.写到内存缓冲区
context.write(new Text(word), new LongWritable(1));
}
}
}
public static class MyReducer extends Reducer<Text,LongWritable,Text,LongWritable>{
/**
* @param key 每个单词
* @param values 单词个数的集合
* @param context 上下文环境
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
Long sum=0L;
//1.遍历指定key单词对用的个数集合[1,1,1]
for (LongWritable value: values
) {
//2.累加个数
sum += value.get();
}
//3.输出
context.write(key,new LongWritable(sum));
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//0.初始化一个job
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "word-count");
/*
打jar包集群方式运行
job.setJarByClass(WordCount.class);
*/
//1.输入文件
FileInputFormat.addInputPath(job, new Path(args[0]));
//2.map并行计算
//如果map的输出key value 的类型个reduce key value的类型相同可以省略
job.setMapperClass(MyMapper.class);
// job.setMapOutputKeyClass(Text.class);
// job.setMapOutputValueClass(LongWritable.class);
//3.shuffle流程(内部实现)
//4.reduce计算
job.setNumReduceTasks(2);
job.setReducerClass(MyReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
//5.输出文件
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//6.提交作业(总入口)
boolean result = job.waitForCompletion(true);
System.out.println(result ? 1 : 0);
}
}
Hadoop MapReduce 基础单词个数统计操作
最新推荐文章于 2022-12-13 15:37:33 发布