wordcount和sort代码

程序1:WordCount.java
package com.wordcount.test;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCount {

  public static class TokenizerMapper 
       extends Mapper<Object, Text, Text, IntWritable>{
    
    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();
      
    public void map(Object key, Text value, Context context
                    ) throws IOException, InterruptedException {
      StringTokenizer itr = new StringTokenizer(value.toString());
      while (itr.hasMoreTokens()) {
        word.set(itr.nextToken());
        context.write(word, one);
      }
    }
  }
  
  public static class IntSumCombiner extends Reducer<Text,IntWritable,Text,IntWritable>{
      private IntWritable result = new IntWritable();
      
      @Override
    protected void reduce(Text key, Iterable<IntWritable> values,
            Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
          int sum = 0;
          for(IntWritable val : values){
              sum += val.get();
          }
          result.set(sum);
          context.write(key,result);
      }
  }
  
  public static class IntSumReducer 
       extends Reducer<Text,IntWritable,IntWritable,Text> {
    private IntWritable result = new IntWritable();

    public void reduce(Text key, Iterable<IntWritable> values, 
                       Context context
                       ) throws IOException, InterruptedException {
      int sum = 0;
      for (IntWritable val : values) {
        sum += val.get();
      }
      result.set(sum);
      context.write(result, key);
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length < 2) {
      System.err.println("Usage: wordcount <in> [<in>...] <out>");
      System.exit(2);
    }
    Job job = new Job(conf, "word count");
    job.setJarByClass(WordCount.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(IntSumCombiner.class);
    job.setReducerClass(IntSumReducer.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);
    
    job.setOutputKeyClass(IntWritable.class);
    job.setOutputValueClass(Text.class);
    
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
   
    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    FileOutputFormat.setOutputPath(job,new Path(otherArgs[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}

程序2:Sort.java
package com.wordcount.test;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Sort {

    
  public static class SimpleMapper extends Mapper<IntWritable,Text,RevertKey,Text>{
    protected void map(IntWritable key, Text value, Mapper<IntWritable, Text, RevertKey, Text>.Context context)
            throws IOException, InterruptedException {
        RevertKey newkey = new RevertKey(key);
        context.write(newkey,value);
      }
  }
  
  public static class SimpleReducer extends Reducer<RevertKey,Text,Text,IntWritable>{
      protected void reduce(RevertKey key, java.lang.Iterable<Text> values, 
              org.apache.hadoop.mapreduce.Reducer<RevertKey,Text,Text,IntWritable>.Context context) throws IOException ,InterruptedException {
          for(Text val : values){
              context.write(val,key.getKey());
          }
      };
  }
  
  public static class RevertKey implements WritableComparable<RevertKey>{
    private IntWritable key;
    public RevertKey(){
        key = new IntWritable();
    }
    
    public RevertKey(IntWritable key){
        this.key = key;
    }
    
    public IntWritable getKey(){
        return key;
    }
    @Override
    public void readFields(DataInput in) throws IOException {
        key.readFields(in);
    }

    @Override
    public void write(DataOutput out) throws IOException {
        key.write(out);
    }

    @Override
    public int compareTo(RevertKey o) {
        return -key.compareTo(o.getKey());
    }
      
  }
  
  
 
  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length < 2) {
      System.err.println("Usage: wordcount <in> [<in>...] <out>");
      System.exit(2);
    }
    Job job = new Job(conf, "word count");
    job.setJarByClass(Sort.class);
    job.setMapperClass(SimpleMapper.class);
    job.setReducerClass(SimpleReducer.class);

    job.setMapOutputKeyClass(RevertKey.class);
    job.setMapOutputValueClass(Text.class);
    
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    
    job.setInputFormatClass(SequenceFileInputFormat.class);
   
    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    FileOutputFormat.setOutputPath(job,new Path(otherArgs[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}

今天在远程桌面上跑了这两个程序

导入jar包 hadoop-2.6.2 share/hadoop 中的common hdfs mapreduce yarn 各自jar包及其lib里的(都倒进去避免麻烦导入jar包。

执行bin/hadoop jar /liyanan/Desktop/wordcounta.jar com.wordcount.test.WordCount /tmp/wordcount tmp/wordcounta

可以用

bin/hadoop fs -ls tmp/

bin/hadoop fs -cat tmp/

查看,之前linux没学好 抓狂尽快补上。

执行完wordcount之后,调试sort,注意输入文件是之前输出的part-r-00000 

bin/hadoop jar /liyanan/Desktop/wordcounta.jar com.wordcount.test.Sort tmp/wordcounta/part-r-00000 tmp/sorta


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值