1.基础排序算法
2.二次排序算法
3.更高级别排序
4.排序算法内幕
1.基础排序算法
sc.textFile("/data/putfile.txt").flatMap(_.split(" ")).map(word=>(word,1)).reduceByKey(_+_,1).map(pair=>(pair._2,pair._1)).sortByKey(false).map(pair=>(pair._2,pair._1)).collect
//key value交换
sc.setLogLevel("WARN")
2.二次排序算法
所谓二次排序就是指排序的时候考虑两个维度(有可能10次排序)
1.CombineByKey
/**
* SecondarySortUsingCombineByKey class implements the secondary sort design pattern
* by using combineByKey().
*
* Input:
*
* name, time, value
* x,2,9
* y,2,5
* x,1,3
* y,1,7
* y,3,1
* x,3,6
* z,1,4
* z,2,8
* z,3,7
* z,4,0
* p,1,10
* p,3,60
* p,4,40
* p,6,20
*
* Output: generate a time-series looking like this:
*
* t1 t2 t3 t4 t5 t6
* x => [3, 9, 6]
* y => [7, 5, 1]
* z => [4, 8, 7, 0]
* p => [10, null, 60, 40, null , 20]
*
* x => [(1,3), (2,9), (3,6)] where 1 < 2 < 3
* y => [(1,7), (2,5), (3,1)] where 1 < 2 < 3
* z => [(1,4), (2,8), (3,7), (4,0)] where 1 < 2 < 3 < 4
* p => [(1,10), (3,60), (4,40), (6,20)] where 1 < 3 < 4 < 6
*
*/1.读取文件,生成RDD
2.普通RDD<String>转换为PairRDD<String, Tuple2<Integer, Integer>>;键值对
3.使用combineByKey函数进行
// How to use combineByKey(): to use combineByKey(), you
// need to define 3 basic functions f1, f2, f3:
// and then you invoke it as: combineByKey(f1, f2, f3)
// function 1: create a combiner data structure --- 初始化,第一次出现进行排序
// function 2: merge a value into a combined data structure -- 对每个分区进行操作,之后再出现就直接加入
// function 3: merge two combiner data structures -- 汇总每个分区的数据,进行总的合并// function 1: create a combiner data structure
// Here, the combiner data structure is a SortedMap<Integer,Integer>,
// which keeps track of (time, value) for a given key
// Tuple2<Integer, Integer> = Tuple2<time, value>
// SortedMap<Integer, Integer> = SortedMap<time, value>
Function<Tuple2<Integer, Integer>, SortedMap<Integer, Integer>> createCombiner
= (Tuple2<Integer, Integer> x) -> {
Integer time = x._1;
Integer value = x._2;
SortedMap<Integer, Integer> map = new TreeMap<>();
map.put(time, value);
return map;
};// function 2: merge a value into a combined data structure
Function2<SortedMap<Integer, Integer>, Tuple2<Integer, Integer>, SortedMap<Integer, Integer>> mergeValue
= (SortedMap<Integer, Integer> map, Tuple2<Integer, Integer> x) -> {
Integer time = x._1;
Integer value = x._2;
map.put(time, value);
return map;
};// function 3: merge two combiner data structures
Function2<SortedMap<Integer, Integer>, SortedMap<Integer, Integer>, SortedMap<Integer, Integer>> mergeCombiners
= (SortedMap<Integer, Integer> map1, SortedMap<Integer, Integer> map2) -> {
if (map1.size() < map2.size()) {
return DataStructures.merge(map1, map2);
} else {
return DataStructures.merge(map2, map1);
}
};
// STEP-5: create sorted (time, value)
JavaPairRDD<String, SortedMap<Integer, Integer>> combined = pairs.combineByKey(
createCombiner,
mergeValue,
mergeCombiners);
public class DataStructures {
/**
* Merge smaller Map into a larger Map
* @param smaller a Map
* @param larger a Map
* @return merged elements* 两个大小不一样的Map进行合并
*/
public static SortedMap<Integer, Integer> merge(
final SortedMap<Integer, Integer> smaller,
final SortedMap<Integer, Integer> larger) {
//
for (Integer key : smaller.keySet()) {
Integer valueFromLargeMap = larger.get(key);
if (valueFromLargeMap == null) {
larger.put(key, smaller.get(key));
}
else {
int mergedValue = valueFromLargeMap + smaller.get(key);
larger.put(key, mergedValue);
}
}
//
return larger;
}
}
2.GroupByKey
//We group JavaPairRDD<> elements by the key ({name}) 按name进行分组统计
JavaPairRDD<String, Iterable<Tuple2<Integer, Integer>>> groups = pairs.groupByKey();
JavaPairRDD<String, Iterable<Tuple2<Integer, Integer>>> sorted = groups.mapValues((Iterable<Tuple2<Integer, Integer>> s) -> {
List<Tuple2<Integer, Integer>> newList = new ArrayList<Tuple2<Integer, Integer>>(iterableToList(s));
Collections.sort(newList, SparkTupleComparator.INSTANCE);
return newList;
}
);static List<Tuple2<Integer,Integer>> iterableToList(Iterable<Tuple2<Integer,Integer>> iterable) {
List<Tuple2<Integer,Integer>> list = new ArrayList<Tuple2<Integer,Integer>>();
for (Tuple2<Integer,Integer> item : iterable) {
list.add(item);
}
return list;
}
public class SparkTupleComparator
implements Comparator<Tuple2<Integer, Integer>>, Serializable {
public static final SparkTupleComparator INSTANCE = new SparkTupleComparator();
private SparkTupleComparator() {
}
@Override
public int compare(Tuple2<Integer, Integer> t1, Tuple2<Integer, Integer> t2){
return t1._1.compareTo(t2._1);
}
}
3.SortWithinPartitions
JavaPairRDD<Tuple2<String, Integer>, Integer> valueToKey =
input.mapToPair(new PairFunction<String, Tuple2<String, Integer>, Integer>() {@Override
public Tuple2<Tuple2<String, Integer>, Integer> call(String t) throws Exception {
String[] array = t.split(",");
Integer value = Integer.parseInt(array[3]);
Tuple2<String, Integer> key =