三个项目实现jar:
- 1.map
package com.doit.mr;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import java.util.HashMap;
/**
*map程序逐行读取数据,将每一行数据的起始偏移量做为key,类型是LongWritable,将每一行数据的内容做为value(Text),传给我们写的map
经过map阶段的处理,输出的key是Text ,value是IntWritable
调用的机制
1,谁在调用,map再调用
2,怎么调:是在当前mapreduce过程中,每读取一行数据,调用一次map
*/
public class WordCountMap extends Mapper <LongWritable, Text,Text, IntWritable>{
<输入:LongWritable为key的类型,Text为value的类型>
///<输出:Text,,,,,,,,,,,LntWritable,,,,,,,,,,,,,,>
//输入:前俩个,输出:后两个
@Override
///重写方法
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String word = value.toString();
String[] words = word.split(" ");
split:用“ ”分块
for (String w:words
) {
count++;
} context.write(new Text(w),new IntWritable(1));}}
- 2Reduce
package com.doit.mr;
import junit.framework.Test;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WordCountReduce extends Reducer <Text, IntWritable, Text,IntWritable>{
@Override
protected void reduce(Text key, Iterable<IntWritable> values ,Context context) throws IOException, InterruptedException {
Integer count = 0;
for (IntWritable v: values
) {
count++;
}context.write(key ,new IntWritable(count));
}
}
- Driver
package com.doit.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import java.io.FileOutputStream;
import java.io.IOException;
public class WordCountDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
conf.set("yarn.resorcemanager.hostname","node01");
conf.set("fs.defutFS","hdfs://node01:9000/");
Job job = Job.getInstance(conf);
job.setJarByClass(WordCountDriver.class);
//设置本次job使用map,reduce
job.setMapperClass(WordCountMap.class);
job.setReducerClass(WordCountReduce.class);
//设置本次map和reduce的输出类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//指定本次job读取数据源时需要的组件:我们的原文件在hdsf中,用TextInputFormat
job.setInputFormatClass(TextInputFormat.class);
//制定本次job输出数据需要的组件:我们要输出到hdfs文件中,用TextOutputFormat
job.setOutputFormatClass(TextOutputFormat.class);
//设置输入路径
FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
//提交任务,客户端返回
job.submit();
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
实现jar 打包
网址:https://blog.csdn.net/njxiaoxiao79/article/details/85679992