在单线程计算中前n项和计算一直没有障碍,随着数据量的膨胀,单线程计算已经无法满足数据计算。计算逐渐被迁移到spark或者hadoop集群上并行计算,但是无论spark还是hadoop并行计算前n项和一直是一个痛点,只能做到每个结点或者容器上的前N项和,却无法做到计算全局前N项和。
现提供一种解决方案,希望大家多多指正。计算过程需要两次便利全部数据。第一次遍历计算每个容器中数据加和结果,并返回paritition的id和容器中数据家和。第二次遍历才计算前Nx项和的家和。现有java版本实现,如需要scala版本或者python版本实现请私信本人。
public void sum(){
SparkConf conf = new SparkConf().setMaster("local").setAppName("temp");
JavaSparkContext ctx = new JavaSparkContext(conf);
List<Integer> list = Arrays.asList(1,2,3,4,5,6,7);
JavaRDD<Integer> soureceRdd = ctx.parallelize(list,4).cache();
List<Tuple2<Integer, Integer>> partitionSub = soureceRdd.mapPartitionsWithIndex(new Function2<Integer, Iterator<Integer>, Iterator<Tuple2<Integer, Integer>>>() {
private static final long serialVersionUID = 1L;
@Override
public Iterator<Tuple2<Integer, Integer>> call(Integer partitionId, Iterator<Integer> v2) throws Exception {
Integer result = 0;
while(v2.hasNext()){
result += v2.next();
}
return Arrays.asList(new Tuple2<Integer, Integer>(partitionId,result)).iterator();
}
}, true).collect();
Map<Integer, Integer> paritionSum = this.sumPriPartition(partitionSub);
JavaRDD<Integer> x = soureceRdd.mapPartitionsWithIndex(new Function2<Integer, Iterator<Integer>, Iterator<Integer>>() {
private static final long serialVersionUID = 1L;
@Override
public Iterator<Integer> call(Integer v1, Iterator<Integer> v2) throws Exception {
List<Integer> result = new CopyOnWriteArrayList<Integer>();
Integer proPartitionSum = paritionSum.get(v1);
while(v2.hasNext()){
proPartitionSum+=v2.next();
result.add(proPartitionSum);
}
return result.iterator();
}
}, true);
}
/*结果<partitionId,当前partition之前所有partition数据和>*/
public Map<Integer, Integer> sumPriPartition(List<Tuple2<Integer, Integer>> list){
Map<Integer, Integer> map = new HashMap<Integer, Integer>();
Integer caluer = 0;
for(Tuple2<Integer, Integer> tuple: list){
Integer partitionId = tuple._1;
map.put(partitionId, caluer);
caluer+=tuple._2;
}
return map;
}