1.建立测试程序 WordCount.java
- import java.io.IOException;
- import java.util.ArrayList;
- import java.util.Iterator;
- import java.util.List;
- import java.util.StringTokenizer;
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.conf.Configured;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.io.IntWritable;
- import org.apache.hadoop.io.LongWritable;
- import org.apache.hadoop.io.Text;
- import org.apache.hadoop.mapred.FileInputFormat;
- import org.apache.hadoop.mapred.FileOutputFormat;
- import org.apache.hadoop.mapred.JobClient;
- import org.apache.hadoop.mapred.JobConf;
- import org.apache.hadoop.mapred.MapReduceBase;
- import org.apache.hadoop.mapred.Mapper;
- import org.apache.hadoop.mapred.OutputCollector;
- import org.apache.hadoop.mapred.Reducer;
- import org.apache.hadoop.mapred.Reporter;
- import org.apache.hadoop.util.Tool;
- import org.apache.hadoop.util.ToolRunner;
- public class WordCount extends Configured implements Tool {
- public static class MapClass extends MapReduceBase implements
- Mapper<LongWritable, Text, Text, IntWritable> {
- private final static IntWritable one = new IntWritable(1);
- private Text word = new Text();
- public void map(LongWritable key, Text value,
- OutputCollector<Text, IntWritable> output, Reporter reporter)
- throws IOException {
- String line = value.toString();
- StringTokenizer itr = new StringTokenizer(line);
- while (itr.hasMoreTokens()) {
- word.set(itr.nextToken());
- output.collect(word, one);
- }
- }
- }
- /**
- * A reducer class that just emits the sum of the input values.
- */
- public static class Reduce extends MapReduceBase implements
- Reducer<Text, IntWritable, Text, IntWritable> {
- public void reduce(Text key, Iterator<IntWritable> values,
- OutputCollector<Text, IntWritable> output, Reporter reporter)
- throws IOException {
- int sum = 0;
- while (values.hasNext()) {
- sum += values.next().get();
- }
- output.collect(key, new IntWritable(sum));
- }
- }
- static int printUsage() {
- System.out.println("wordcount [-m <maps>] [-r <reduces>] <input> <output>");
- ToolRunner.printGenericCommandUsage(System.out);
- return -1;
- }
- /**
- * The main driver for word count map/reduce program. Invoke this method to
- * submit the map/reduce job.
- *
- * @throws IOException
- * When there is communication problems with the job tracker.
- */
- public int run(String[] args) throws Exception {
- JobConf conf = new JobConf(getConf(), WordCount.class);
- conf.setJobName("wordcount");
- // the keys are words (strings)
- conf.setOutputKeyClass(Text.class);
- // the values are counts (ints)
- conf.setOutputValueClass(IntWritable.class);
- conf.setMapperClass(MapClass.class);
- conf.setCombinerClass(Reduce.class);
- conf.setReducerClass(Reduce.class);
- List<String> other_args = new ArrayList<String>();
- for (int i = 0; i < args.length; ++i) {
- try {
- if ("-m".equals(args[i])) {
- conf.setNumMapTasks(Integer.parseInt(args[++i]));
- } else if ("-r".equals(args[i])) {
- conf.setNumReduceTasks(Integer.parseInt(args[++i]));
- } else {
- other_args.add(args[i]);
- }
- } catch (NumberFormatException except) {
- System.out.println("ERROR: Integer expected instead of "
- + args[i]);
- return printUsage();
- } catch (ArrayIndexOutOfBoundsException except) {
- System.out.println("ERROR: Required parameter missing from "
- + args[i - 1]);
- return printUsage();
- }
- }
- // Make sure there are exactly 2 parameters left.
- if (other_args.size() != 2) {
- System.out.println("ERROR: Wrong number of parameters: "
- + other_args.size() + " instead of 2.");
- return printUsage();
- }
- FileInputFormat.setInputPaths(conf, other_args.get(0));
- FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1)));
- JobClient.runJob(conf);
- return 0;
- }
- public static void main(String[] args) throws Exception {
- int res = ToolRunner.run(new Configuration(), new WordCount(), args);
- System.exit(res);
- }
- }
- jar cvf WordCount.jar *.class
2. 建立测试用的输入文件
建立/usr/file01和/usr/file02两个文件,里面随机敲入一些单词。
3. 启动Hadoop。
- $ bin/hadoop namenode -format
- $ bin/start-all.sh
4.将测试文件复制到hdfs中
利用下列命令在hdfs中哦该拿创建input文件夹,并将测试输入文件复制到hdfs下的/tmp/input文件夹中
- $ bin/hadoop fs -mkdir /tmp/input
- $ bin/hadoop fs -put file01 /tmp/input/
- $ bin/hadoop fs -put file02 /tmp/input/
5.运行WordCount程序
第一个参数是MapReduce的输入内容,第二个参数是MapReduce的输出内容
- $ bin/hadoop jar WordCount.jar WordCount /tmp/input /tmp/output
执行成功后查看output文件夹
- $ bin/hadoop fs -ls /tmp/output/
显示如下结果:
- Found 2 items
- drwxr-x--- - admin admin 0 2010-09-16 22:43 /tmp/output/_logs
- -rw-r----- 1 admin admin 102 2010-09-16 22:44 /tmp/output/part-00000
查看执行结果:
- $ bin/hadoop fs -cat /tmp/output/part-00000
结果为:
- Goodbye 2
- Hadoop 4
- Hello 2