mapreduce之WritableComparable实例TOPN

在进行mapreduce编程时key键往往用于分组或排序,当我们在进行这些操作时Hadoop内置的key键数据类型不能满足需求时,或针对用例优化自定义数据类型可能执行的更好。
因此可以通过实现org.apache.hadoop.io.WritableComparable接口定义一个自定义的WritableComparable类型,并使其作为mapreduce计算的key类型

自定义类型

package first.first_maven;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.WritableComparable;
/*
 * 1、自定义数据类型要实现WritableComparable(能排序);Writable不能排序
 * 2、方法write、readFields里面的字段个数和顺序要一直
 * 3、可以重写toString,equal,hashcode等方法
 */

public class TopNWritable implements WritableComparable<TopNWritable> {

	public String word;
	public int counter;
	
	public TopNWritable() {}
	
	public TopNWritable(String word, int counter) {
		super();
		this.word = word;
		this.counter = counter;
	}
	public String getWord() {
		return word;
	}
	public void setWord(String word) {
		this.word = word;
	}
	public int getCounter() {
		return counter;
	}
	public void setCounter(int counter) {
		this.counter = counter;
	}
	@Override
	public int compareTo(TopNWritable o) {
		// TODO Auto-generated method stub
		return o.counter-this.counter;  //根据计数倒排
	}
	
   //反序列化
	@Override
	public void readFields(DataInput in) throws IOException {
		this.word=in.readUTF();
		this.counter=in.readInt();
		
	}
	
   //序列化
	@Override
	public void write(DataOutput out) throws IOException {
	     out.writeUTF(word);
	     out.writeInt(counter);
		
	}
	//当将改对象作为reduce的结果key或values时,输出的格式由toString的返回值决定
	@Override
	public String toString() {
		return "TopNWritable [word=" + word + ", counter=" + counter
				+ ", toString()=" + super.toString() + "]";
	}
	
	

}

 

下面是mapreduce的应用,说明都在代码的注释中

package first.first_maven;

import java.io.IOException;
import java.util.TreeSet;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class TopN  {
	public static class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
		
		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {

			String words[]=value.toString().split(" ");
			for(String word:words){
				
				context.write(new Text(word), new IntWritable(1));
				
			}
		}
	}

	public static class MyReducer extends Reducer<Text, IntWritable, TopNWritable, NullWritable>{
		
		
		//自定义topn
		private static final int TOP_N=3;
		//用于存放TopNWritable对象,由于TopNWritable是倒序,所以ts是倒排的二叉树
		TreeSet<TopNWritable> ts=new TreeSet<TopNWritable>();
		
		
		@Override
		protected void reduce(Text key, Iterable<IntWritable> value,Context context)
				throws IOException, InterruptedException {
			
           
			int counter=0;
			for(IntWritable v:value){
				counter+=v.get();
			}
			
			TopNWritable tn=new TopNWritable(key.toString(),counter);
			//每个键都会往ts中插入
			ts.add(tn);
			//每次都保留ts中的三个元素,ts中最后的元素是counter最小的单词,多余的将会被移除
			if(ts.size()>TOP_N){
				ts.remove(ts.last());
			}
			//下面语句会导致每个单词都会输出结果,而我们只要前三个,所以应该在cleanup中实现
			//context.write(tn, NullWritable.get());
			
		}

        /* cleanup在reduce结束时调用一次,如用于资源释放等
         * 对于最终数据的过滤,输出要放在cleanUp中。这样就能实现对数据,不一组一组输出,而是全部拿到,最后过滤输出
         * 
         */
		@Override
		protected void cleanup(Context context)throws IOException, InterruptedException {
			for(TopNWritable t:ts){
				context.write(t, NullWritable.get());
			}
		}
	}
	
	
	public static void main(String[] args) throws Exception {
        Configuration conf=new Configuration();
        Job job = Job.getInstance(conf, "myjob");
        job.setJarByClass(WordCount.class);

        job.setMapperClass(MyMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        FileInputFormat.addInputPath(job,new Path(args[0]));


        
        job.setReducerClass(MyReducer.class);
        job.setOutputKeyClass(TopNWritable.class);
        job.setOutputValueClass(NullWritable.class);
        FileOutputFormat.setOutputPath(job,new Path(args[1]));

        int isok=job.waitForCompletion(true)?0:1;

        System.exit(isok);
        }
	

}

 

转载于:https://my.oschina.net/u/4010291/blog/3008923

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值