package Basic
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable.ArrayBuffer
/**
* Created by tg on 10/23/16.
*/
object OperatorTest {
def main(args: Array[String]): Unit = {
// mapPartitionsDemo
// mapPartitionsWithIndex
// sampleDemo
// unionDemo
// intersectionDemo
distinctDemo
}
/**
* distinct方法是将RDD中重复的元素去掉,只留下唯一的RDD元素。
*/
def distinctDemo: Unit ={
val conf=new SparkConf().setAppName("distinctDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val accessLogs=sc.parallelize(Array(
"user1 2016-01-01 23:58:42",
"user1 2016-01-01 23:58:43",
"user1 2016-01-01 23:58:44",
"user2 2016-01-01 12:58:42",
"user2 2016-01-01 12:58:46",
"user3 2016-01-01 12:58:42",
"user4 2016-01-01 12:58:42",
"user5 2016-01-01 12:58:42",
"user6 2016-01-01 12:58:42",
"user6 2016-01-01 12:58:45"
))
val resultRDD=accessLogs.map(log=>log.split(" ")(0)).distinct()
println("UV:"+resultRDD.count())
}
/**
* intersection方法可以获取两个RDD中相同的数据
*/
def intersectionDemo: Unit ={
val conf=new SparkConf().setAppName("sampleDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus1=sc.parallelize(Array("leo","jack","tom","marray","jen","jarry"))
//模拟数据
val stus2=sc.parallelize(Array("jack","devid","leo","hengry"))
val resultRDD=stus1.intersection(stus2)
resultRDD.foreach(println _)
}
/**
* union方法(等价于“++”)是将两个RDD取并集,取并集的过程中不会把相同元素去掉。
* union操作是输入分区与输出分区多对一模式。
*/
def unionDemo: Unit ={
val conf=new SparkConf().setAppName("sampleDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus1=sc.parallelize(Array("leo","jack","tom","marray","jen","jarry"))
//模拟数据
val stus2=sc.parallelize(Array("green","devid","lily","hengry"))
val resultRDD=stus1.union(stus2)
resultRDD.foreach(item=>println(item))
println(resultRDD.count())
}
/**
* sample方法的作用是随机的对RDD中的元素采样,获得一个新的子集RDD。根据参数能指定是否放回采样、子集占总数的百分比和随机种子
* 随机种子不是必须的,可以不指定。
*/
def sampleDemo: Unit ={
val conf=new SparkConf().setAppName("sampleDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus=sc.parallelize(Array("leo","jack","tom","marray","jen","jarry",
"green","devid","lily","hengry"))
val resultRDD=stus.sample(false,0.2)
resultRDD.foreach(println _)
}
/**
* mapPartitionsWithIndex方法与mapPartitions方法功能类似
* 不同的是mapPartitionsWithIndex还会对原始分区的索引进行追踪,这样就能知道分区所对应的元素
* 方法的参数为一个函数,函数的输入为整型索引和迭代器。
*/
def mapPartitionsWithIndex: Unit ={
val conf=new SparkConf().setAppName("mapPartitionsWithIndex").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus=sc.parallelize(Array("leo","jack","tom","marray","jen"),2)
//注意mapPartitionsWithIndex需要两个参数
val resultRDD=stus.mapPartitionsWithIndex((m,n)=>{
var result=ArrayBuffer[String]()
var stuName=""
var info=""
while(n.hasNext){
stuName=n.next()
info=stuName+"在"+(m+1)+"班"
result+=info
}
result.iterator
})
val classInfo=resultRDD.collect()
for(elem <- classInfo) println(elem)
}
/**
* mapPartitions的输入函数作用于每个分区,也就是把每个分区中的内容作为整体来处理。
*/
def mapPartitionsDemo: Unit ={
val conf=new SparkConf().setAppName("mapPartitionsDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus=sc.parallelize(Array("leo","jack","tom","marray"),2)
val scores=Map(("leo", 600),("jack", 650),("tom", 500),("marray", 620),("jen", 510))
val resutlRDD=stus.mapPartitions(x=>{
val result=ArrayBuffer[Int]() //创建变长数组
var score=0
while(x.hasNext){
score=scores(x.next())
result+=score //将成绩添加到变长数组
}
result.iterator //返回一个iterator集合
})
//for循环遍历
resutlRDD.foreach(item=>{
println(item)
})
}
}
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable.ArrayBuffer
/**
* Created by tg on 10/23/16.
*/
object OperatorTest {
def main(args: Array[String]): Unit = {
// mapPartitionsDemo
// mapPartitionsWithIndex
// sampleDemo
// unionDemo
// intersectionDemo
distinctDemo
}
/**
* distinct方法是将RDD中重复的元素去掉,只留下唯一的RDD元素。
*/
def distinctDemo: Unit ={
val conf=new SparkConf().setAppName("distinctDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val accessLogs=sc.parallelize(Array(
"user1 2016-01-01 23:58:42",
"user1 2016-01-01 23:58:43",
"user1 2016-01-01 23:58:44",
"user2 2016-01-01 12:58:42",
"user2 2016-01-01 12:58:46",
"user3 2016-01-01 12:58:42",
"user4 2016-01-01 12:58:42",
"user5 2016-01-01 12:58:42",
"user6 2016-01-01 12:58:42",
"user6 2016-01-01 12:58:45"
))
val resultRDD=accessLogs.map(log=>log.split(" ")(0)).distinct()
println("UV:"+resultRDD.count())
}
/**
* intersection方法可以获取两个RDD中相同的数据
*/
def intersectionDemo: Unit ={
val conf=new SparkConf().setAppName("sampleDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus1=sc.parallelize(Array("leo","jack","tom","marray","jen","jarry"))
//模拟数据
val stus2=sc.parallelize(Array("jack","devid","leo","hengry"))
val resultRDD=stus1.intersection(stus2)
resultRDD.foreach(println _)
}
/**
* union方法(等价于“++”)是将两个RDD取并集,取并集的过程中不会把相同元素去掉。
* union操作是输入分区与输出分区多对一模式。
*/
def unionDemo: Unit ={
val conf=new SparkConf().setAppName("sampleDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus1=sc.parallelize(Array("leo","jack","tom","marray","jen","jarry"))
//模拟数据
val stus2=sc.parallelize(Array("green","devid","lily","hengry"))
val resultRDD=stus1.union(stus2)
resultRDD.foreach(item=>println(item))
println(resultRDD.count())
}
/**
* sample方法的作用是随机的对RDD中的元素采样,获得一个新的子集RDD。根据参数能指定是否放回采样、子集占总数的百分比和随机种子
* 随机种子不是必须的,可以不指定。
*/
def sampleDemo: Unit ={
val conf=new SparkConf().setAppName("sampleDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus=sc.parallelize(Array("leo","jack","tom","marray","jen","jarry",
"green","devid","lily","hengry"))
val resultRDD=stus.sample(false,0.2)
resultRDD.foreach(println _)
}
/**
* mapPartitionsWithIndex方法与mapPartitions方法功能类似
* 不同的是mapPartitionsWithIndex还会对原始分区的索引进行追踪,这样就能知道分区所对应的元素
* 方法的参数为一个函数,函数的输入为整型索引和迭代器。
*/
def mapPartitionsWithIndex: Unit ={
val conf=new SparkConf().setAppName("mapPartitionsWithIndex").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus=sc.parallelize(Array("leo","jack","tom","marray","jen"),2)
//注意mapPartitionsWithIndex需要两个参数
val resultRDD=stus.mapPartitionsWithIndex((m,n)=>{
var result=ArrayBuffer[String]()
var stuName=""
var info=""
while(n.hasNext){
stuName=n.next()
info=stuName+"在"+(m+1)+"班"
result+=info
}
result.iterator
})
val classInfo=resultRDD.collect()
for(elem <- classInfo) println(elem)
}
/**
* mapPartitions的输入函数作用于每个分区,也就是把每个分区中的内容作为整体来处理。
*/
def mapPartitionsDemo: Unit ={
val conf=new SparkConf().setAppName("mapPartitionsDemo").setMaster("local")
val sc=new SparkContext(conf)
//模拟数据
val stus=sc.parallelize(Array("leo","jack","tom","marray"),2)
val scores=Map(("leo", 600),("jack", 650),("tom", 500),("marray", 620),("jen", 510))
val resutlRDD=stus.mapPartitions(x=>{
val result=ArrayBuffer[Int]() //创建变长数组
var score=0
while(x.hasNext){
score=scores(x.next())
result+=score //将成绩添加到变长数组
}
result.iterator //返回一个iterator集合
})
//for循环遍历
resutlRDD.foreach(item=>{
println(item)
})
}
}