Spark的RDD操作和描述_2

本次博客分发三大部分

一,Spark的RDD用JAVA的实现

二,Spark的RDD的说明

三,Spark的Scala的实现

1, Cartesian算子

1.1,Java代码实现

package com.lyl.it;

import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import scala.Tuple2;

public class CartesianOperator {

	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setAppName("CartesianOperator").setMaster("local");

		JavaSparkContext sc = new JavaSparkContext(conf);
		List<String> words = Arrays.asList("A","B","C","D","F");
		List<String> numbers = Arrays.asList("1","2","3","4","5");
		JavaRDD<String> wordsRDD = sc.parallelize(words);
		JavaRDD<String> numberRDD = sc.parallelize(numbers);
		
		JavaPairRDD<String, String> pairs = wordsRDD.cartesian(numberRDD);
		for (Tuple2<String, String> pair:pairs.collect()) {
			System.out.println(pair);
		}
		
		sc.close();
	}
}

1.2,说明把对两个RDD的元素进行合并,下面是程序运行的结果

18/08/02 15:37:35 INFO DAGScheduler: Job 0 finished: collect at CartesianOperator.java:25, took 0.178710 s
(A,1)
(A,2)
(A,3)
(A,4)
(A,5)
(B,1)
(B,2)
(B,3)
(B,4)
(B,5)
(C,1)
(C,2)
(C,3)
(C,4)
(C,5)
(D,1)
(D,2)
(D,3)
(D,4)
(D,5)
(F,1)
(F,2)
(F,3)
(F,4)
(F,5)
18/08/02 15:37:35 INFO BlockManagerInfo: Removed broadcast_0_piece0 on localhost:54275 in memory (size: 1350.0 B, free: 1115.3 MB)

1.3,Scala的实现

package com.lyl.it

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext

object CartesianOperator {
  
       def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setAppName("CartesianOperator").setMaster("local")
        val sc = new SparkContext(conf)
        
        val words = Array("A","B","C","D","F")
        val numbers = Array("1","2","3","4","5")
        
        val wordsRDD = sc.parallelize(words)
        val numberRDD = sc.parallelize(numbers)
        
        val pairs = wordsRDD.cartesian(numberRDD)
        
        for( pairs <- pairs.collect() ){
          println(pairs)
         }
      }     
}

2, CogroupOperator算子 

2.1,Java代码实现

package com.lyl.it;

import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;

import scala.Tuple2;

public class CogroupOperator {
    
	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setAppName("CogroupOperator").setMaster("local");
		JavaSparkContext sc = new JavaSparkContext(conf);
		
		@SuppressWarnings("unchecked")
		List<Tuple2<String, String>> studentList = Arrays.asList(
				new Tuple2<String, String>("1","A"),
				new Tuple2<String, String>("2","B"),
				new Tuple2<String, String>("1","C"),
				new Tuple2<String, String>("3","A"),
				new Tuple2<String, String>("1","F"),
				new Tuple2<String, String>("2","A")
				);
		
		@SuppressWarnings("unchecked")
		List<Tuple2<String, String>> sourcetList = Arrays.asList(
				new Tuple2<String, String>("1","100"),
				new Tuple2<String, String>("2","90"),
				new Tuple2<String, String>("1","80"),
				new Tuple2<String, String>("3","60"),
				new Tuple2<String, String>("1","50"),
				new Tuple2<String, String>("2","40")
				);
		
		JavaPairRDD<String, String> students = sc.parallelizePairs(studentList);
		JavaPairRDD<String, String> scores = sc.parallelizePairs(sourcetList);
		
		JavaPairRDD<String, Tuple2<Iterable<String>, Iterable<String>>>
		studentScores = students.cogroup(scores);
		
		studentScores.foreach(new VoidFunction<Tuple2<String,Tuple2<Iterable<String>,Iterable<String>>>>() {
			
			private static final long serialVersionUID = 1L;

			public void call(
					Tuple2<String, Tuple2<Iterable<String>, Iterable<String>>> tuple)
					throws Exception {
				System.out.println("student id: "+tuple._1);
				System.out.println("student name: "+tuple._2._1);
				System.out.println("student score: "+tuple._2._2);
			}
		});
		
		sc.close();
	}
}

2.2,说明获得分组元素,下面是程序运行的结果

18/08/02 15:44:23 INFO ShuffleBlockFetcherIterator: Started 0 remote fetches in 0 ms
student id: 2
student name: [B, A]
student score: [90, 40]
student id: 3
student name: [A]
student score: [60]
student id: 1
student name: [A, C, F]
student score: [100, 80, 50]
18/08/02 15:44:23 INFO Executor: Finished task 0.0 in stage 2.0 (TID 2). 1165 bytes result sent to driver

2.3,Scala的实现

package com.lyl.it

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext

object CogroupOperator {
       
      def main(args: Array[String]): Unit = {  
      val conf = new SparkConf().setAppName("CogroupOperator").setMaster("local")
      val sc = new SparkContext(conf)
      
      val studentList = Seq(("1","A"),("2","B"),("1","C"),("3","A"),("1","F"),("2","A"))
      val sourcetList = Seq(("1","100"),("2","90"),("1","80"),("3","60"),("1","50"),("2","40"))
      
      val students = sc.parallelize(studentList)
      val scores = sc.parallelize(sourcetList)
      students.cogroup(scores)
              .foreach{
               tuple => println("student id: "+tuple._1)
                        println("student name: "+tuple._2._1)
                        println("student score: "+tuple._2._2)
               }
      
      }
}

3, CollectOperator算子 

3.1,Java代码实现

package com.lyl.it;

import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;

public class CollectOperator {
	
	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setAppName("CollectOperator").setMaster("local");
		JavaSparkContext sc = new JavaSparkContext(conf);
		
		List<Integer> numberList = Arrays.asList(1,2,3,4,5);
		JavaRDD<Integer> numbers = sc.parallelize(numberList);
		
		JavaRDD<Integer> doubleNumbers = numbers.map(new Function<Integer, Integer>() {

			private static final long serialVersionUID = 1L;

			public Integer call(Integer v) throws Exception {
				return v*2;
			}
		});
		
		List<Integer> doubleNumberList = doubleNumbers.collect();
		for (Integer num:doubleNumberList) {
			System.out.println(num);
		}
		
		sc.close();
	}
}

3.2,说明把数据汇到一个节点上,下面是程序运行的结果

18/08/02 15:47:05 INFO DAGScheduler: Job 0 finished: collect at CollectOperator.java:29, took 0.181348 s
2
4
6
8
10
18/08/02 15:47:05 INFO SparkUI: Stopped Spark web UI at http://192.168.158.1:4040
18/08/02 15:47:05 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!

3.3,Scala的实现

package com.lyl.it

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext

object CollectOperator {
  
     def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setAppName("CollectOperator").setMaster("local")
        val sc = new SparkContext(conf)
        val numberList = Array(1,2,3,4,5)
        val doubleNumbers = sc.parallelize(numberList)
                           .map(v => v*2)
                           .collect()
        for(num <- doubleNumbers){
          println(num)
        }
     }
}

4, CountByKey算子 

4.1,Java代码实现

package com.lyl.it;

import java.util.Arrays;
import java.util.List;
import java.util.Map;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;

import scala.Tuple2;

public class CountByKeyOperator {
	
	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setAppName("CountByKeyOperator").setMaster("local");
		JavaSparkContext sc = new JavaSparkContext(conf);
		
		@SuppressWarnings("unchecked")
		List<Tuple2<String, String>> scoreList = Arrays.asList(
				new Tuple2<String, String>("1","A"),
				new Tuple2<String, String>("2","A"),
				new Tuple2<String, String>("1","A"),
				new Tuple2<String, String>("3","A"),
				new Tuple2<String, String>("1","A"),
				new Tuple2<String, String>("2","A")
				);
		
		JavaPairRDD<String, String> students = sc.parallelizePairs(scoreList);
		
		Map<String, Object> counts = students.countByKey();
		
		for (Map.Entry<String, Object> studentCount:counts.entrySet()) {
			System.out.println(studentCount.getKey()+":"+studentCount.getValue());
		}
		
		sc.close();
	}
}

4.2,说明?? 下面是程序运行的结果

18/08/02 15:51:26 INFO DAGScheduler: Job 0 finished: countByKey at CountByKeyOperator.java:31, took 0.268164 s
2:2
3:1
1:3
18/08/02 15:51:26 INFO SparkUI: Stopped Spark web UI at http://192.168.158.1:4040

4.3,Scala的实现

package com.lyl.it

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext

object CountByKeyOperator {
     
     def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setAppName("CollectOperator").setMaster("local")
        val sc = new SparkContext(conf)
        
        val scoreList = Seq(("1","A"),("2","A"),("1","A"),("3","A"),("1","A"),("2","A"))
        val counts = sc.parallelize(scoreList).countByKey()
        
        for(studentCount <- counts){
           println(studentCount)
        }
        
     }
}

5,Count算子 

5.1,Java代码实现

package com.lyl.it;

import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

public class CountOperator {

	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setAppName("CountOperator").setMaster("local");
		JavaSparkContext sc = new JavaSparkContext(conf);
		
		List<Integer> numberList = Arrays.asList(1,2,3,4,5);
		JavaRDD<Integer> numbers = sc.parallelize(numberList);
		
		long count = numbers.count();
		System.out.println(count);
		
		sc.close();
	}
}

5.2,说明?? 下面是程序运行的结果

18/08/02 15:57:52 INFO DAGScheduler: Job 0 finished: count at CountOperator.java:19, took 0.209459 s
5
18/08/02 15:57:52 INFO SparkUI: Stopped Spark web UI at http://192.168.158.1:4040
18/08/02 15:57:52 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!

5.3,Scala的实现

package com.lyl.it

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext

object CountOperator {
      
      def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setAppName("CountOperator").setMaster("local")
        val sc = new SparkContext(conf)
        val numberList = Array(1,2,3,4,5)
        val count = sc.parallelize(numberList).count()
        println(count)
        
      }
}

 

6,Dinstinct算子 

6.1,Java代码实现

6.2,说明?? 下面是程序运行的结果

6.3,Scala的实现

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值