Spark RDD算子(九)基本的Action(动作)算子first、take、collect、count、countByValue、reduce、aggregate、fold、top
first
- 返回RDD中第一个元素
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object first {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("firstScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4,5))
println(rdd1.first())
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import java.util.Arrays;
public class firstJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("firstJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
Integer rdd2 = rdd1.first();
System.out.println(rdd2);
}
}
结果展示:
take
- 返回rdd.take(n)中前n个元素
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object take {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("takeScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4,5))
val rdd2: Array[Int] = rdd1.take(3)
rdd2.foreach(println)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import java.util.Arrays;
import java.util.List;
public class takeJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("takeJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
List<Integer> rdd2 = rdd1.take(3);
for (Integer a :
rdd2) {
System.out.println(a);
}
}
}
结果展示:
collect
- 返回RDD中所有的元素
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object collect {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("collectScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4,5))
val rdd2: Array[Int] = rdd1.collect()
rdd2.foreach(println)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import java.util.Arrays;
import java.util.List;
public class collectJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("collectJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
List<Integer> collect = rdd1.collect();
for (Integer a :
collect) {
System.out.println(a);
}
}
}
结果展示:
count
- 返回RDD中的元素的个数
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object count {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("countScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4,5))
val rdd2: Long = rdd1.count()
println(rdd2)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import java.util.Arrays;
public class countJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("countJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
long rdd2 = rdd1.count();
System.out.println(rdd2);
}
}
结果展示:
countByValue
- 各元素在RDD中出现的次数,返回{(key1,次数),(key2,次数),…(keyn,次数)}
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object countByValue {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("countByValueScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,3,4,4,5))
val rdd2: collection.Map[Int, Long] = rdd1.countByValue()
rdd2.foreach(println)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import java.util.Arrays;
import java.util.Map;
import java.util.Set;
public class countByValueJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("countByValueJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 3, 4, 4, 5));
Map<Integer, Long> rdd2 = rdd1.countByValue();
Set<Integer> rdd3 = rdd2.keySet();
for (Integer a :
rdd3) {
System.out.println(a+":"+rdd2.get(a));
}
}
}
结果展示:
reduce
- rdd.reduce(func)并行整合RDD中所有数据,类似于Scala中集合的reduce
// An highlighted block
var foo = 'bar';
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object reduce {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("reduceScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4))
val rdd2: Int = rdd1.reduce((x, y)=>x+y)
println(rdd2)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import java.util.Arrays;
public class reduceJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("reduceJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4));
Integer rdd2 = rdd1.reduce(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
System.out.println(rdd2);
}
}
结果展示:
aggregate
- 和reduce()相似,但是通常返回不同类型的函数,一般不用这个函数
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object a {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setMaster("local[1]").setAppName("a")
val sc = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4,5))
def fun1(x:Int,y:Int):Int={
val s = "com_exp = %d + %d"
println(s.format(x, y))
x+y
}
def fun2(x:Int,y:Int):Int={
val s = "com_exp = %d + %d"
println(s.format(x, y))
x+y
}
val rdd2: Int = rdd1.aggregate(5)(fun1,fun2)
println(rdd2)
}
}
结果展示:
fold
- rdd.fold(num)(fun)一般不用这个函数
- 和reduce()一样,但是提供了初始值num,每个元素计算时,先要和这个初始值进行折叠,注意,这里会按照每个分区进行fold,然后分区之间还会再次进行fold
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object fold {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[1]").setAppName("foldScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4))
def seqno(m: Int, n: Int): Int = {
val s = "seq_exp = %d + %d"
println(s.format(m, n))
m + n
}
val rdd2: Int = rdd1.fold(5)(seqno)
println(rdd2)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import java.util.Arrays;
public class foldJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[1]").setAppName("foldJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4));
Integer rdd2 = rdd1.fold(5, new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
System.out.println(rdd2);
}
}
结果展示:
top
- rdd.top(n)按照降序或指定的排序规则(默认是倒序),返回前n个元素
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object top {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("topScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4,5))
val rdd2: Array[Int] = rdd1.top(3)
rdd2.foreach(println)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import java.util.Arrays;
import java.util.List;
public class topJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("topJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
List<Integer> rdd2 = rdd1.top(3);
for (Integer a :
rdd2) {
System.out.println(a);
}
System.out.println(rdd2);
}
}
结果展示:
takeOrdered
- rdd.take(n)
- 对RDD元素进行升序排序,取出前n个元素并返回,也可以自定义比较器(这里不介绍),类似于top的相反的方法
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object takeOrdered {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("takeOrderedScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4))
val rdd2: Array[Int] = rdd1.takeOrdered(3)
rdd2.foreach(println)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import java.util.Arrays;
import java.util.List;
public class takeOrderedJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("takeOrderedJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1= sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
List<Integer> rdd2 = rdd1.takeOrdered(3);
for (Integer a :
rdd2) {
System.out.println(a);
}
}
}
结果展示:
foreach
- 对RDD中的每个元素使用给定的函数
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object foreach {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("foreachScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4,5))
val rdd2: Array[Int] = rdd1.takeOrdered(3)
rdd2.foreach(println(_))
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import java.util.Arrays;
public class foreachJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("foreachJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
rdd1.foreach(new VoidFunction<Integer>() {
@Override
public void call(Integer integer) throws Exception {
System.out.println(integer);
}
});
}
}
结果展示: