hadoop 的wordcount java实验

1.导入hadoop需要用到的包

hadoop-2.4.2/share/hadoop/mapreduce/*.jar
hadoop-2.4.2/share/hadoop/mapreduce/lib/*.jar

hadoop-2.4.2/share/hadoop/common/*.jar
hadoop-2.4.2/share/hadoop/common/lib/*.jar

2.编写java程序

package demo;

import java.io.IOException;
import java.util.*;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;

public class WordCount {

 public static class Map extends MapReduceBase implements Mapper{
  private final static IntWritable one=new IntWritable(1);
  private Text word=new Text();
  @Override
  public void map(LongWritable key,  Text value,
    OutputCollector output, Reporter reporter)
    throws IOException {
   // TODO Auto-generated method stub
   String line=value.toString();
   StringTokenizer tokenizer=new StringTokenizer(line);
   while (tokenizer.hasMoreTokens()){
    word.set(tokenizer.nextToken());
    output.collect(word,one);
    }
   }
   
  }
 //旧版本
 public static class Reduce extends MapReduceBase implements Reducer{

  @Override
  public void reduce(Text key, Iterator values,
    OutputCollector output, Reporter reporter)
    throws IOException {
   // TODO Auto-generated method stub
    int sum=0;
    while(values.hasNext()){
     sum+=values.next().get();
    }
    output.collect(key, new IntWritable(sum)); 
  }
 }

 
 public static void main(String[] args) throws Exception{
  // TODO Auto-generated method stub
  //System.setProperty("HADOOP_USER_NAME","root");  
  JobConf conf=new JobConf(WordCount.class);
  //conf.set("fs.defaultFS","hdfs://192.168.1.120:9000");
  conf.setJobName("wordcount");
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(IntWritable.class);

  conf.setMapperClass(Map.class);
  conf.setReducerClass(Reduce.class);
  
  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputFormat(TextOutputFormat.class);
  
  FileInputFormat.setInputPaths(conf,new Path(args[0]));
  FileOutputFormat.setOutputPath(conf,new Path(args[1]));

  JobClient.runJob(conf);
 }

}

3.导出为jar文件

4.上传到linux系统中。

5.新建input目录,如果有output目录,先删除

6.上传jar包后,到jar包的目录下,执行

    hadoop jar WordCount.jar demo.WordCount /input/* /output/

7.如果执行时不带“/”,会在hadoop目录中新建/user/root下新建两个文件夹,会提示

      Exception in thread "main" org.apache.hadoop.mapred.InvalidInputException: Input path does not exist: file:  /input  Exception in thread "main" org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: hdfs://master:9000/user/root/input

  只需要在执行的时候带上“/”就行。

8.获取分离后的文件

hadoop fs -get /output/* output/

来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/30180323/viewspace-2144014/,如需转载,请注明出处,否则将追究法律责任。

转载于:http://blog.itpub.net/30180323/viewspace-2144014/

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值