命令行编译运行WordCount.java

1.首先拷贝hadoop源码里的WordCount.java到一个目录wordcount下
 
2.在wordcount目录下新建目录bin准备存放class文件
 
3.编译WordCount.java文件(编译前先把java文件中的package包名删掉):
javac -classpath /usr/local/hadoop/share/hadoop/common/hadoop-common-2.6.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.0.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-cli-1.2.jar -d bin WordCount.java
 
4.制作jar包:  
 
jar -cvf WordCount.jar ./WordCount*.class
 
5.生成input在bin中执行命令
mkdir input
echo "echo of the rainbow" > ./input/file0
echo "the waiting game" > ./input/file1
 
6.在hadoop单机版运行WordCount.jar包
/usr/local/hadoop/bin/hadoop   jar   bin/WordCount.jar   WordCount   input   output
 
7.查看output结果(结果在output文件夹下的part-r-00000文件中)
 cat ./output/part-r-00000 或
 vim ./output/part-r-00000
 
8附WordCount.java源代码
/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.examples;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCount {

  public static class TokenizerMapper 
       extends Mapper<Object, Text, Text, IntWritable>{
    
    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();
      
    public void map(Object key, Text value, Context context
                    ) throws IOException, InterruptedException {
      StringTokenizer itr = new StringTokenizer(value.toString());
      while (itr.hasMoreTokens()) {
        word.set(itr.nextToken());
        context.write(word, one);
      }
    }
  }
  
  public static class IntSumReducer 
       extends Reducer<Text,IntWritable,Text,IntWritable> {
    private IntWritable result = new IntWritable();

    public void reduce(Text key, Iterable<IntWritable> values, 
                       Context context
                       ) throws IOException, InterruptedException {
      int sum = 0;
      for (IntWritable val : values) {
        sum += val.get();
      }
      result.set(sum);
      context.write(key, result);
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length != 2) {
      System.err.println("Usage: wordcount <in> <out>");
      System.exit(2);
    }
    Job job = new Job(conf, "word count");
    job.setJarByClass(WordCount.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}

 

发布了153 篇原创文章 · 获赞 46 · 访问量 16万+
展开阅读全文

Hadoop运行WordCount程序有输入,不生成输出的文件

12-22

![图片说明](https://img-ask.csdn.net/upload/201812/21/1545407846_986988.jpg) 代码如下: import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.GenericOptionsParser; import java.io.IOException; import java.util.Iterator; import java.util.StringTokenizer; public class WordCountMapReduce { public static void main(String[] args) throws Exception{ Configuration configuration = new Configuration(); String[] otherArgs = (new GenericOptionsParser(configuration, args)).getRemainingArgs(); if (otherArgs.length < 2){ System.out.println("Usage:wordcount<in>[<in>···]<out>"); System.exit(2); } */ /** * 设置环境参数 *//* Job job = Job.getInstance(configuration, "wordcount"); */ /** * 设置整个程序的类名 *//* job.setJarByClass(WordCountMapReduce.class); */ /** * 添加Mapper类 *//* job.setMapperClass(WordCountMapReduce.WordCountMapper.class); */ /** * ? *//* //job.setCombinerClass(WordCountMapReduce.WordCountReducer.class); */ /** * 添加Reducer类 *//* job.setReducerClass(WordCountMapReduce.WordCountReducer.class); */ /** * 设置输出类型 *//* job.setOutputKeyClass(Text.class); */ /** * 设置输出类型 *//* job.setOutputValueClass(IntWritable.class); for (int i = 0;i < otherArgs.length - 1;++i){ */ /** * 设置输入文件 *//* FileInputFormat.addInputPath(job, new Path(otherArgs[i])); } */ /** * 设置输出文件 *//* FileOutputFormat.setOutputPath(job, new Path(otherArgs[otherArgs.length - 1])); System.exit(job.waitForCompletion(true)?0:1); } //map程序 public static class WordCountMapper extends Mapper<Object, Text, Text, IntWritable> { */ /* * map方法是提供给map task进程来调用的,map task进程是每读取一行文本来调用一次我们自定义的map方法 * map task在调用map方法时,传递的参数: * 一行的起始偏移量LongWritable作为key * 一行的文本内容Text作为value *//* private static final IntWritable one = new IntWritable(1); private Text word = new Text(); public WordCountMapper() { } @Override protected void map(Object key, Text value, Mapper<Object, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException { StringTokenizer stringTokenizer = new StringTokenizer(value.toString()); while (stringTokenizer.hasMoreTokens()) { this.word.set(stringTokenizer.nextToken()); context.write(this.word, one); } } } //reduce程序 */ /* * KEYIN:对应mapper阶段输出的key类型 * VALUEIN:对应mapper阶段输出的value类型 * KEYOUT:reduce处理完之后输出的结果kv对中key的类型 * VALUEOUT:reduce处理完之后输出的结果kv对中value的类型 *//* public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> { */ /* * reduce方法提供给reduce task进程来调用 * * reduce task会将shuffle阶段分发过来的大量kv数据对进行聚合,聚合的机制是相同key的kv对聚合为一组 * 然后reduce task对每一组聚合kv调用一次我们自定义的reduce方法 * 比如:<hello,1><hello,1><hello,1><tom,1><tom,1><tom,1> * hello组会调用一次reduce方法进行处理,tom组也会调用一次reduce方法进行处理 * 调用时传递的参数: * key:一组kv中的key * values:一组kv中所有value的迭代器 *//* private IntWritable intWritable = new IntWritable(); public WordCountReducer(){ } public void intWritable(Text key, Iterable<IntWritable>values, Reducer<Text, IntWritable, Text, IntWritable>.Context context)throws IOException, InterruptedException{ int sum = 0; IntWritable val; for (Iterator i$ = values.iterator(); i$.hasNext(); sum += val.get()){ val = (IntWritable)i$.next(); } this.intWritable.set(sum); context.write(key, this.intWritable); } } } 问答

没有更多推荐了,返回首页

©️2019 CSDN 皮肤主题: 大白 设计师: CSDN官方博客

分享到微信朋友圈

×

扫一扫,手机浏览