spark snappy save text file

SQL context available as sqlContext.

scala> var myVar : String = "Foo"
myVar: String = Foo

scala> val myVal : String = "Foo"
myVal: String = Foo

scala> var myVar : String = "Foo1"
myVar: String = Foo1

scala> myVal="aa"
<console>:27: error: reassignment to val
myVal="aa"
^

scala&gt; myVal="aa";
<console>:27: error: reassignment to val
myVal="aa";
^

scala&gt; myVal="Foo";
<console>:27: error: reassignment to val
myVal="Foo";
^

scala&gt; myVar="jack"
myVar: String = jack

scala&gt; var myVar = 10;
myVar: Int = 10

scala&gt; val myVal = "Hello, Scala!";
myVal: String = Hello, Scala!

scala&gt; val (myVar1: Int, myVar2: String) = Pair(40, "Foo")
myVar1: Int = 40
myVar2: String = Foo

scala&gt; val jack: (myVar1: Int, myVar2: String) = Pair(40, "Foo")
<console>:1: error: ')' expected but ':' found.
val jack: (myVar1: Int, myVar2: String) = Pair(40, "Foo")
^

scala&gt; val jack: (myVar1: Int, myVar2: String) = Pair(40, "Foo")
<console>:1: error: ')' expected but ':' found.
val jack: (myVar1: Int, myVar2: String) = Pair(40, "Foo")
^

scala&gt; val (myVar1, myVar2) = Pair(40, "Foo")
myVar1: Int = 40
myVar2: String = Foo

scala&gt; object Test{
| def main(args:Array[String]){
| var x = 10
| if(x Test
res0: Test.type = $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$Test$@63843e11

scala&gt; Test
res1: Test.type = $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$Test$@63843e11

scala&gt; Test.main("a")
<console>:28: error: type mismatch;
found : String("a")
required: Array[String]
Test.main("a")
^

scala&gt; Test.main(List.("a"))
<console>:1: error: identifier expected but '(' found.
Test.main(List.("a"))
^

scala&gt; val arr = sc.parallelize(Array(("A",1),("B",2),("C",3)));
arr: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[0] at parallelize at <console>:27

scala&gt; arr.flatmap(x=&gt;(x._1+x_2)).foreach(println);
<console>:30: error: value flatmap is not a member of org.apache.spark.rdd.RDD[(String, Int)]
arr.flatmap(x=&gt;(x._1+x_2)).foreach(println);
^

scala&gt; arr.map(x=&gt;(x._1+x_2)).foreach(println);
<console>:30: error: not found: value x_2
arr.map(x=&gt;(x._1+x_2)).foreach(println);
^

scala&gt; arr.map(x=&gt;(x._1+x._2)).foreach(println);

scala&gt; val arr = sc.parallelize(Array(("A",1),("B",2),("C",3)));
arr: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[2] at parallelize at <console>:27

scala&gt; arr.map(x=&gt;(x._1+x._2)).foreach(println);

scala&gt; List("a","b").foreach(println);
a
b

scala&gt; val arr = sc.parallelize(Array(("A",1),("B",2),("C",3)));
arr: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[4] at parallelize at <console>:27

scala&gt; println(arr);
ParallelCollectionRDD[4] at parallelize at <console>:27

scala&gt; arr.foreach(println);

scala&gt; val arr=sc.parallelize(Array(("A",1),("B",2),("C",3)))
arr: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[5] at parallelize at <console>:27

scala&gt; arr.flatmap(x=&gt;(x._1+x._2)).foreach(println)
<console>:30: error: value flatmap is not a member of org.apache.spark.rdd.RDD[(String, Int)]
arr.flatmap(x=&gt;(x._1+x._2)).foreach(println)
^

scala&gt; val arr=sc.parallelize(Array(("A",1),("B",2),("C",3)))
arr: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[6] at parallelize at <console>:27

scala&gt; arr.map(x=&gt;(x._1+x._2)).foreach(println)

scala&gt; arr.flatMap(x=&gt;(x._1+x._2)).foreach(println)

scala&gt; arr.flatMap(x=&gt;(x._1+x._2)).foreach(println)

scala&gt; arr.flatMap(x=&gt;(x._1+x._2)).foreach(println)

scala&gt; arr.map(x=&gt;(x._1+","+x._2)).foreach(println)

scala&gt; arr.first();
res16: (String, Int) = (A,1)

scala&gt; arr.count()
res17: Long = 3

scala&gt; val rdd = sc.parallelize(1,10,2);
<console>:27: error: too many arguments for method parallelize: (seq: Seq[T], numSlices: Int)(implicit evidence$1: scala.reflect.ClassTag[T])org.apache.spark.rdd.RDD[T]
val rdd = sc.parallelize(1,10,2);
^

scala&gt; val rdd = sc.parallelize(1 to 10,2);
rdd: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[12] at parallelize at <console>:27

scala&gt; val reduceRDD = rdd.reduce(_ + _)
reduceRDD: Int = 55

scala&gt; rdd.first()
res18: Int = 1

scala&gt; rdd.count
res19: Long = 10

scala&gt; val reduceRDD1 = rdd.reduce(_ - _)
reduceRDD1: Int = 15

scala&gt; val countRDD = rdd.count()
countRDD: Long = 10

scala&gt; val firstRDD = rdd.first()
firstRDD: Int = 1

scala&gt; val takeRDD = rdd.take(5)
takeRDD: Array[Int] = Array(1, 2, 3, 4, 5)

scala&gt; val topRDD = rdd.top(3)
topRDD: Array[Int] = Array(10, 9, 8)

scala&gt; val takeOrderedRDD = rdd.takeOrdered(3)
takeOrderedRDD: Array[Int] = Array(1, 2, 3)

scala&gt; println("func +: "+reduceRDD)
func +: 55

scala&gt; println("func -: "+reduceRDD1)
func -: 15

scala&gt; println("count: "+countRDD)
count: 10

scala&gt; println("first: "+firstRDD)
first: 1

scala&gt; println("take:")
take:

scala&gt; takeRDD.foreach(x =&gt; print(x +" "))
1 2 3 4 5
scala&gt; takeRDD.foreach(x =&gt; println(x +" "))
1
2
3
4
5

scala&gt; arr.flatMap(x=&gt;(x._1+x._2)).foreach(x=&gt;println(x +""))

scala&gt; val arr=sc.parallelize(Array(("A",1),("B",2),("C",3)))
arr: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[16] at parallelize at <console>:27

scala&gt; arr.take(1)
res28: Array[(String, Int)] = Array((A,1))

scala&gt; arr.foreach(x=&gt;println(x._1 +","+x._2))

scala&gt; arr.lookup("A");
res30: Seq[Int] = WrappedArray(1)

scala&gt; arr.countByKey()
res31: scala.collection.Map[String,Long] = Map(B -&gt; 1, A -&gt; 1, C -&gt; 1)

scala&gt; arr.collectAsMap
res32: scala.collection.Map[String,Int] = Map(A -&gt; 1, C -&gt; 3, B -&gt; 2)

scala&gt; val arr = List(("A", 1), ("B", 2), ("A", 2), ("B", 3))
arr: List[(String, Int)] = List((A,1), (B,2), (A,2), (B,3))

scala&gt; val rdd = sc.parallelize(arr,2)
rdd: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[21] at parallelize at <console>:29

scala&gt; val countByKeyRDD = rdd.countByKey()
countByKeyRDD: scala.collection.Map[String,Long] = Map(B -&gt; 2, A -&gt; 2)

scala&gt; val collectAsMapRDD = rdd.collectAsMap()
collectAsMapRDD: scala.collection.Map[String,Int] = Map(A -&gt; 2, B -&gt; 3)

scala&gt; countByKeyRDD.foreach(print)
(B,2)(A,2)
scala&gt; collectAsMapRDD.foreach(print)
(A,2)(B,3)
scala&gt; val rdd = sc.parallelize(List(1,2,3,4),2)
rdd: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[24] at parallelize at <console>:27

scala&gt; val aggregateRDD = rdd.aggregate(2)(_+_,_ * _)
aggregateRDD: Int = 90

scala&gt; println(aggregateRDD)
90

scala&gt; def setOp(a:String,b:String):String={
| println("seqOp:"+a+"\t"+b);
| math.min(a.length, b.length).toString();
| }
setOp: (a: String, b: String)String

scala&gt; def combOp(a:String,b:String):String = {
| println("combOp:"+a+"\t"+b);
| a+b;
| }
combOp: (a: String, b: String)String

scala&gt; val z = sc.parallelize(List("12","23","345","4567"),2);
z: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[25] at parallelize at <console>:27

scala&gt; z.aggregate("")(seqOp,combOp);
<console>:32: error: not found: value seqOp
z.aggregate("")(seqOp,combOp);
^

scala&gt; def seqOp(a:String,b:String):String={
| println("seqOp:"+a+"\t"+b);
| math.min(a.length, b.length).toString();
| }
seqOp: (a: String, b: String)String

scala&gt; z.aggregate("")(seqOp,combOp);
[Stage 30:&gt; (0 + 2) / 2]combOp: 1
[Stage 30:=============================&gt; (1 + 1) / 2]combOp:1 1
res37: String = 11

scala&gt; z.aggregate("")(seqOp,combOp);
combOp: 1
combOp:1 1
res38: String = 11

scala&gt; z.aggregate("")(seqOp,combOp);
combOp: 1
combOp:1 1
res39: String = 11

scala&gt; def seqOp(a:String,b:String):String={
| println("seqOp jack:"+a+"\t"+b);
| math.min(a.length, b.length).toString();
| }
seqOp: (a: String, b: String)String

scala&gt; def combOp(a:String,b:String):String = {
| println("combOp jack:"+a+"\t"+b);
| a+b;
| }
combOp: (a: String, b: String)String

scala&gt; val z = sc.parallelize(List("12","23","345","4567"),2);
z: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[26] at parallelize at <console>:27

scala&gt; z.aggregate("")(seqOp,combOp);
[Stage 33:&gt; (0 + 2) / 2]combOp jack: 1
[Stage 33:=============================&gt; (1 + 1) / 2]combOp jack:1 1
res40: String = 11

scala&gt; z.aggregate("")(seqOp,combOp);
combOp jack: 1
[Stage 34:=============================&gt; (1 + 1) / 2]combOp jack:1 1
res41: String = 11

scala&gt; z.aggregate("")(seqOp,combOp);
[Stage 35:&gt; (0 + 0) / 2]combOp jack: 1
combOp jack:1 1
res42: String = 11

scala&gt; z.first()
res43: String = 12

scala&gt; z.top(4)
res44: Array[String] = Array(4567, 345, 23, 12)

scala&gt; z.count
res45: Long = 4

scala&gt; val z = List("12","23","345","4567");
z: List[String] = List(12, 23, 345, 4567)

scala&gt; z.aggregate("")(seqOp,combOp);
seqOp jack: 12
seqOp jack:0 23
seqOp jack:1 345
seqOp jack:1 4567
res46: String = 1

scala&gt; def seqOp(a:String,b:String):String={
| println("seqOp jack:"+a+"\t"+b);
| println("return:"+math.min(a.length, b.length).toString());
| return math.min(a.length, b.length).toString();
| }
seqOp: (a: String, b: String)String

scala&gt; def combOp(a:String,b:String):String = {
| println("combOp jack:"+a+"\t"+b);
| a+b;
| }
combOp: (a: String, b: String)String

scala&gt; val z = List("12","23","345","4567");
z: List[String] = List(12, 23, 345, 4567)

scala&gt; z.aggregate("")(seqOp,combOp);
seqOp jack: 12
return:0
seqOp jack:0 23
return:1
seqOp jack:1 345
return:1
seqOp jack:1 4567
return:1
res47: String = 1

scala&gt; countByKeyRDD.foreach(print)
(B,2)(A,2)
scala&gt; collectAsMapRDD.foreach(print)
(A,2)(B,3)
scala&gt; collectAsMapRDD.saveAsTextFile("hdfs://192.168.1.56/tmp/jackteset")
<console>:34: error: value saveAsTextFile is not a member of scala.collection.Map[String,Int]
collectAsMapRDD.saveAsTextFile("hdfs://192.168.1.56/tmp/jackteset")
^

scala&gt; var rdd1 = sc.makeRDD(1 to 10,2)
rdd1: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[28] at makeRDD at <console>:27

scala&gt; rdd1.saveAsTextFile("hdfs://192.168.1.56/tmp/jackteset")

scala&gt; rdd1.saveAsTextFile("hdfs://192.168.1.56/tmp/jackteset1",classOf[org.apache.hadoop.io.compress.SnappyCodec])</console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console></console>
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值