原始数据
HDFS
words
hello tom hello jerry hello kitty hello world hello tom
Map阶段
每次读一行数据,
2.拆分每行数据,
3.每个单词碰到一次写个1
<0, "hello tom">
<10, "hello jerry">
<22, "hello kitty">
<34, "hello world">
<46, "hello tom">
-
import org.apache.hadoop.io.IntWritable;
-
import org.apache.hadoop.io.LongWritable;
-
import org.apache.hadoop.io.Text;
-
import org.apache.hadoop.mapreduce.Mapper;
-
import java.io.IOException;
-
/**
-
* LongWritable 偏移量 long,表示该行在文件中的位置,而不是行号
-
* Text map阶段的输入数据 一行文本信息 字符串类型 String
-
* Text map阶段的数据字符串类型 String
-
* IntWritable map阶段输出的value类型,对应java中的int型,表示行号
-
*/
-
public class WordCountMap extends Mapper<LongWritable, Text, Text, IntWritable>{
-
@Override
-
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
-
//读取每行文本
-
String line = value.toString();
-
//splite拆分每行
-
String[] words = line.split(" ");//分词
-
//取出每个单词
-
for(String word : words) {
-
//将单词转为Text类型
-
Text wordText = new Text(word);
-
//将1转变为IntWritable
-
IntWritable outValue = new IntWritable(1);
-
//写出单词,跟对应1
-
context.write(wordText, outValue);
-
}
-
}
-
}
reduce阶段
1.把单词对应的那些1
2遍历,
3求和
<hello, {1,1,1,1,1}>
<jerry, {1}>
<kitty, {1}>
<tom, {1,1}>
<world, {1}>
-
import org.apache.hadoop.io.IntWritable;
-
import org.apache.hadoop.io.Text;
-
import org.apache.hadoop.mapreduce.Reducer;
-
import java.io.IOException;
-
/***
-
* Text 输入的字符串类型,序列化
-
* IntWritable 输入一串1,序列化
-
* Text 输出的字符串类型,序列化
-
* IntWritable 输出的求和数组,序列化
-
* ***/
-
public class WordCountReduce extends Reducer<Text,IntWritable,Text,IntWritable>{
-
/***
-
* key 输入单词名字
-
* values 输入一串1
-
* context 输出的工具
-
* ***/
-
@Override
-
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
-
int sum=0;
-
//遍历一群1
-
for(IntWritable i:values){
-
sum+=i.get();
-
}
-
context.write(key,new IntWritable(sum));
-
}
-
}
使用对象
-
import org.apache.hadoop.conf.Configuration;
-
import org.apache.hadoop.fs.Path;
-
import org.apache.hadoop.io.IntWritable;
-
import org.apache.hadoop.io.Text;
-
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;;
-
import org.apache.hadoop.mapreduce.Job;
-
public class WordCount {
-
public static void main(String[] args) throws Exception {
-
//定义配置对象
-
Configuration conf=new Configuration();
-
//定义一个工作任务对象
-
Job job=Job.getInstance(conf);
-
//获取map阶段的一个对象
-
job.setMapperClass(WordCountMap.class);
-
//指定map阶段的一个输出key
-
job.setMapOutputKeyClass(Text.class);
-
//指定map阶段输出的values类型
-
job.setMapOutputValueClass(IntWritable.class);
-
//map阶段的输入文件
-
FileInputFormat.setInputPaths(job,new Path("C:\\Users\\1234\\Desktop\\123.txt"));
-
//指定Reduce的类
-
job.setReducerClass(WordCountReduce.class);
-
//指定reduce阶段的一个输出key
-
job.setOutputKeyClass(Text.class);
-
//指定reduce阶段输出的values类型
-
job.setOutputValueClass(IntWritable.class);
-
//指定reduce阶段的输出文件
-
FileOutputFormat.setOutputPath(job,new Path("C:\\Users\\1234\\Desktop\\456"));
-
//submit
-
job.waitForCompletion(true);
-
}
-
}
处理后数据
hello 5
jerry 1
kitty 1
tom 2
world 1