//mapreduce程序
import
java.io.IOException;
import
java.util.StringTokenizer;
import
org.apache.hadoop.conf.Configuration;
import
org.apache.hadoop.fs.Path;
import
org.apache.hadoop.io.IntWritable;
import
org.apache.hadoop.io.LongWritable;
import
org.apache.hadoop.io.Text;
import
org.apache.hadoop.mapreduce.Job;
import
org.apache.hadoop.mapreduce.Mapper;
import
org.apache.hadoop.mapreduce.Reducer;
import
org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public
class
WordCount {
/**
* TokenizerMapper 继续自 Mapper<LongWritable, Text, Text, IntWritable>
*
* [一个文件就一个map,两个文件就会有两个map]
* map[这里读入输入文件内容 以" \t\n\r\f" 进行分割,然后设置 word ==> one 的key/value对]
*
* @param Object Input key Type:
* @param Text Input value Type:
* @param Text Output key Type:
* @param IntWritable Output value Type:
*
* Writable的主要特点是它使得Hadoop框架知道对一个Writable类型的对象怎样进行serialize以及deserialize.
* WritableComparable在Writable的基础上增加了compareT接口,使得Hadoop框架知道怎样对WritableComparable类型的对象进行排序。
*
* @ author liuqingjie
*
*/
public
static
class
TokenizerMapper
extends
Mapper<LongWritable, Text, Text, IntWritable>{
private
final
static
IntWritable one =
new
IntWritable(
1
);
private
Text word =
new
Text();
public
void
map(LongWritable key, Text value, Context context)
throws
IOException, InterruptedException {
StringTokenizer itr =
new
StringTokenizer(value.toString());
while
(itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
/**
* IntSumReducer 继承自 Reducer<Text,IntWritable,Text,IntWritable>
*
* [不管几个Map,都只有一个Reduce,这是一个汇总]
* reduce[循环所有的map值,把word ==> one 的key/value对进行汇总]
*
* 这里的key为Mapper设置的word[每一个key/value都会有一次reduce]
*
* 当循环结束后,最后的确context就是最后的结果.
*
* @author liuqingjie
*
*/
public
static
class
IntSumReducer
extends
Reducer<Text,IntWritable,Text,IntWritable> {
private
IntWritable result =
new
IntWritable();
public
void
reduce(Text key, Iterable<IntWritable> values,
Context context
)
throws
IOException, InterruptedException {
int
sum =
0
;
for
(IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
public
static
void
main(String[] args)
throws
Exception {
Configuration conf =
new
Configuration();
if
(args.length !=
2
) {
System.err.println(
"请配置路径 "
);
System.exit(
2
);
}
Job job =
new
Job(conf,
"wordcount"
);
job.setJarByClass(WordCount.
class
);
//主类
job.setMapperClass(TokenizerMapper.
class
);
//mapper
job.setReducerClass(IntSumReducer.
class
);
//reducer
job.setMapOutputKeyClass(Text.
class
);
//设置map输出数据的关键类
job.setMapOutputValueClass(IntWritable.
class
);
//设置map输出值类
job.setOutputKeyClass(Text.
class
);
//设置作业输出数据的关键类
job.setOutputValueClass(IntWritable.
class
);
//设置作业输出值类
FileInputFormat.addInputPath(job,
new
Path(otherArgs[
0
]));
//文件输入
FileOutputFormat.setOutputPath(job,
new
Path(otherArgs[
1
]));
//文件输出
System.exit(job.waitForCompletion(
true
) ?
0
:
1
);
//等待完成退出.
}
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
Job job =
new
Job(conf,
"word count"
);
job.setJarByClass(WordCount.
class
);
//主类
job.setMapperClass(TokenizerMapper.
class
);
//mapper
job.setReducerClass(IntSumReducer.
class
);
//reducer
job.setMapOutputKeyClass(Text.
class
);
//设置map输出数据的关键类
job.setMapOutputValueClass(IntWritable.
class
);
//设置map输出值类
job.setOutputKeyClass(Text.
class
);
//设置作业输出数据的关键类
job.setOutputValueClass(IntWritable.
class
);
//设置作业输出值类
FileInputFormat.addInputPath(job,
new
Path(otherArgs[
0
]));
//文件输入
FileOutputFormat.setOutputPath(job,
new
Path(otherArgs[
1
]));
//文件输出
System.exit(job.waitForCompletion(
true
) ?
0
:
1
);
//等待完成退出.
|