package cds.hadoop.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.StringUtils;
/**
* Mapper Task
*/
public classWordCountMapperextendsMapper<LongWritable, Text, Text, LongWritable> {
@Override/**
* key: line number;
* value: a line of input
*/protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
// get the line
String line = value.toString();
// split a line onto words
String[] words = StringUtils.split(line, ' ');
// output <word, 1>for (String word : words) {
context.write(new Text(word), new LongWritable(1L));
}
}
}
WordCountReducer.java
package cds.hadoop.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
* Reducer Task
* */
public classWordCountReducerextendsReducer<Text, LongWritable, Text, LongWritable> {
@Override// input is <key, <v1,v2,...,vn>>protected void reduce(Text key, Iterable<LongWritable> values,
Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
// count the number that every word appears
long count = 0;
for (LongWritable value : values) {
count += value.get();
}
// output is <word, count>, it's what we want
context.write(new Text(key), new LongWritable(count));
}
}
WordCountRunner.java
package cds.hadoop.wordcount;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* describe a job:
* which class is mapper?
* which class is reducer?
* where is the input file?
* where to output?
*
* Then submit this job to Hadoop
* */publicclassWordCountRunner {
publicstaticvoidmain(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job wcjob = Job.getInstance(conf);
wcjob.setJarByClass(WordCountRunner.class);
// set Mapper
wcjob.setMapperClass(WordCountMapper.class);
wcjob.setMapOutputKeyClass(Text.class);
wcjob.setMapOutputValueClass(LongWritable.class);
// set Reducer
wcjob.setReducerClass(WordCountReducer.class);
wcjob.setOutputKeyClass(Text.class);
wcjob.setOutputValueClass(LongWritable.class);
// where is the input file?// FileInputFormat is a component of Job, but it has a default realization,// so we don't write a class of FileInputFormat in this demo
FileInputFormat.setInputPaths(wcjob, "hdfs://master:9000/wc/srcData");
// where to output?
FileOutputFormat.setOutputPath(wcjob, new Path("hdfs://master:9000/wc/output"));
boolean ret = wcjob.waitForCompletion(true);
}
}
输出与结果:
2016-04-0408:28:39,173 WARN [main] util.NativeCodeLoader (NativeCodeLoader.java:<clinit>(62)) - Unable to load native-hadoop libraryfor your platform... using builtin-java classes where applicable
2016-04-0408:28:40,150 INFO [main] Configuration.deprecation (Configuration.java:warnOnceIfDeprecated(1173)) - session.id is deprecated. Instead, use dfs.metrics.session-id
2016-04-0408:28:40,151 INFO [main] jvm.JvmMetrics (JvmMetrics.java:init(76)) - Initializing JVM Metrics with processName=JobTracker, sessionId=
2016-04-0408:28:40,707 WARN [main] mapreduce.JobResourceUploader (JobResourceUploader.java:uploadFiles(64)) - Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
2016-04-0408:28:40,757 WARN [main] mapreduce.JobResourceUploader (JobResourceUploader.java:uploadFiles(171)) - No job jar file set. User classes may not be found. See Job or Job#setJar(String).
2016-04-0408:28:41,003<