hadoop -- 排序

需求

对100万条在0~1000万之间的数据进行排序
输入数据:
8995149,5191755,2093544,9816608,4360204,5507730,1289204,6166586,8955325,9567003,
8264570,5202810,5353570,279129,9281133,6205171,5684360,1666876,4727056,2383250,
9176282,2815703,5145176,7514591,8648912,4633139,4176111,8954834,1702148,7978927,

Map端 shuffle过程

Map端处理中间结果先存入100M环形缓存区中,当写入的数据达到设定的阈值(一般80%)时,系统将会启动一个线程将数据写入本地磁盘临时文件中,这个过程就是spill(溢写)。
  在spill写入之前,会先进行二次排序,首先根据数据所属的partition进行排序,然后每个partition中的数据再按key来排序。默认根据对key的hash值取余分区数的规则划分到不同的Reducer上去,以后的Reducer就会根据partition来读取自己对应的数据。
在这里插入图片描述

Hadoop内置Partitioner

Hadoop中自带了一个默认的分区类HashPartitioner,它继承了Partitioner类,提供了一个getPartition的方法,它的定义如下所示:

/** Partition keys by their {@link Object#hashCode()}. */
public class HashPartitioner<K, V> extends Partitioner<K, V> {
  /** Use {@link Object#hashCode()} to partition. */
  public int getPartition(K key, V value,
                          int numReduceTasks) {
    return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks;
  }
}

Hadoop内置排序

MapReduce的排序是默认按照key大小或字典顺序来输出

需求实现方法

自定义实现Partitioner中的分区方法和SortComparator中的排序规则

PartionByKey
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Partitioner;

public class PartionByKey extends Partitioner<IntWritable, NullWritable> {

	@Override
	public int getPartition(IntWritable key, NullWritable value, int numPartitions) {
		int widthRange = 10000000 / numPartitions + 1;
		int partitionIndex = key.get() / widthRange;
		return partitionIndex;
	}

}
SortComparatorByKey
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;

public class SortComparatorByKey extends WritableComparator{
    public SortComparatorByKey() {
        super(IntWritable.class,true);
    }
 
    @Override
    public int compare(WritableComparable a, WritableComparable b) {
    	IntWritable k1 = (IntWritable) a;
    	IntWritable k2 = (IntWritable) b;
        return k1.compareTo(k2);
    }
}

java代码实现

// An highlighted block

import java.io.IOException;

import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class Sort extends Configured implements Tool {


	public static class Map extends Mapper<LongWritable, Text, IntWritable, NullWritable> {

		private static final NullWritable NULL = NullWritable.get();
		private static final String SPLIT = ",";

		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
			String line = value.toString();
			String[] ss = line.split(SPLIT);
			for (int i = 0; i < ss.length; i++) {
				IntWritable number = new IntWritable(Integer.parseInt(ss[i]));
				context.write(number, NULL);
			}
		}

	}

	public static class Reduce extends Reducer<IntWritable, NullWritable, IntWritable, NullWritable> {

		@Override
		protected void reduce(IntWritable key, Iterable<NullWritable> values, Context context)
				throws IOException, InterruptedException {
			for (NullWritable nullWritable : values) {
				context.write(key, nullWritable);
			}
		}

	}

	public int run(String[] args) throws Exception {

		// create job
		Job job = Job.getInstance();
		// set job's name
		job.setJobName("mr sort");
		// set job's class
		job.setJarByClass(Sort.class);

		// job set map's class
		job.setMapperClass(Map.class);
		// job set reduce's class
		job.setReducerClass(Reduce.class);

		// job set class of map's output
		job.setMapOutputKeyClass(IntWritable.class);
		job.setMapOutputValueClass(NullWritable.class);

		// job set class of output's key
		job.setOutputKeyClass(IntWritable.class);
		// job set class of output's value
		job.setOutputValueClass(NullWritable.class);

		// set class of inputFormat
		job.setInputFormatClass(TextInputFormat.class);
		// set class of outputFormat
		// job.setOutputFormatClass(TextOutputFormat.class);

		FileInputFormat.setInputPaths(job, args[0]);
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		// job设置分区类
		job.setPartitionerClass(PartionByKey.class);

		// job设置排序类
		job.setSortComparatorClass(SortComparatorByKey.class);

		job.setNumReduceTasks(10);

		boolean success = job.waitForCompletion(true);
		return success ? 0 : 1;
	}

	public static void main(String[] args) throws Exception {

		int run = ToolRunner.run(new Sort(), args);
		System.exit(run);
	}
}
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值