List
package database
object list {
println("Welcome to the Scala worksheet") //> Welcome to the Scala worksheet
val bigData = List("Hadoop" , "Spark") //> bigData : List[String] = List(Hadoop, Spark)
val data = List(1 , 2 , 3) //> data : List[Int] = List(1, 2, 3)
val bigData_Core = "Hadoop" :: ("Spark" :: Nil)
//> bigData_Core : List[String] = List(Hadoop, Spark)
val data_Int = 1 :: 2 :: 3 :: Nil //> data_Int : List[Int] = List(1, 2, 3)
println(data.isEmpty) //> false
println(data.head) //> 1
println(data.tail.head) //> 2
println(data.tail) //> List(2, 3)
//复制给a b
val List(a,b) = bigData //> a : String = Hadoop
//| b : String = Spark
println("a : " + a + " === " + " b: " + b) //> a : Hadoop === b: Spark
val x :: y :: rest = data //> x : Int = 1
//| y : Int = 2
//| rest : List[Int] = List(3)
println("x : " + x + " === " + " y: " + y + " === " + rest )
//> x : 1 === y: 2 === List(3)
val shuffledData = List(6,3,5,6,2,9,1) //> shuffledData : List[Int] = List(6, 3, 5, 6, 2, 9, 1)
println(sortList(shuffledData)) //> List(1, 2, 3, 5, 6, 6, 9)
def sortList(list : List[Int]): List[Int] = list match{
case List() => List()
case head :: tail => compute (head, sortList(tail))
} //> sortList: (list: List[Int])List[Int]
def compute(data : Int , dataSet : List[Int]) : List[Int] = dataSet match{
case List() => List(data)
case head :: tail => if (data <= head) data :: dataSet
else head :: compute(data, tail)
} //> compute: (data: Int, dataSet: List[Int])List[Int]
}
println(List (1,2,3,4) ::: List (4,5,6,7,8) ::: List (10,11))
//> List(1, 2, 3, 4, 4, 5, 6, 7, 8, 10, 11)
println(List (1,2,3,4) ::: (List (4,5,6,7,8) ::: List (10,11)))
//> List(1, 2, 3, 4, 4, 5, 6, 7, 8, 10, 11)
println(List (1,2,3,4).length) //> 4
val bigData = List("Hadoop" , "Spark" , "Kaffka")
//> bigData : List[String] = List(Hadoop, Spark, Kaffka)
println(bigData.last) //> Kaffka
// Selects all elements except the last.
println(bigData.init) //> List(Hadoop, Spark)
println(bigData.reverse) //> List(Kaffka, Spark, Hadoop)
//不可变
println(bigData) //> List(Hadoop, Spark, Kaffka)
println(bigData take 2) //> List(Hadoop, Spark)
println(bigData drop 1) //> List(Spark, Kaffka)
println(bigData splitAt 2) //> (List(Hadoop, Spark),List(Kaffka))
println(bigData apply 2) //> Kaffka
println(bigData(2)) //> Kaffka
val data=List('a' ,'b', 'c', 'd', 'e', 'f') //> data : List[Char] = List(a, b, c, d, e, f)
//a Range value from 0 to one less than the length of this list.
println(data.indices) //> Range(0, 1, 2, 3, 4, 5)
println(data.indices zip data) //> Vector((0,a), (1,b), (2,c), (3,d), (4,e), (5,f))
//Zips this list with its indices.
println(data.zipWithIndex) //> List((a,0), (b,1), (c,2), (d,3), (e,4), (f,5))
println(data.toString) //> List(a, b, c, d, e, f)
println(data.mkString ("[", ",,", "]")) //> [a,,b,,c,,d,,e,,f]
println(data.mkString ("******")) //> a******b******c******d******e******f
println(data mkString) //> abcdef
val buffer = new StringBuilder //> buffer : StringBuilder =
data addString (buffer, "(", ";;", ")") //> res0: StringBuilder = (a;;b;;c;;d;;e;;f)
println(buffer) //> (a;;b;;c;;d;;e;;f)
println(data) //> List(a, b, c, d, e, f)
val array = data.toArray //> array : Array[Char] = Array(a, b, c, d, e, f)
println(array.toList) //> List(a, b, c, d, e, f)
val new_Array = new Array[Char](10) //> new_Array : Array[Char] = Array(
package database
object list2 {
println("Welcome to the Scala worksheet") //> Welcome to the Scala worksheet
println((1 to 100).foldLeft(0)(_+_) ) //> 5050
println((0 /: (1 to 100))(_+_)) //> 5050
println((1 to 5).foldRight(100)(_-_)) //> -97
println(((1 to 5):\100)(_-_)) //> -97
println(List(1, -3, 4, 2, 6) sortWith (_ < _))//> List(-3, 1, 2, 4, 6)
println(List(1, -3, 4, 2, 6) sortWith (_ > _))//> List(6, 4, 2, 1, -3)
println(List(1, 2, 3, 4, 6) map (_ + 1)) //> List(2, 3, 4, 5, 7)
val data = List("Scala", "Hadoop", "Spark") //> data : List[String] = List(Scala, Hadoop, Spark)
println(data map (_.length)) //> List(5, 6, 5)
println(data map (_.toList.reverse.mkString)) //> List(alacS, poodaH, krapS)
println(data.map(_.toList)) //> List(List(S, c, a, l, a), List(H, a, d, o, o, p), List(S, p, a, r, k))
println(data.flatMap(_.toList)) //> List(S, c, a, l, a, H, a, d, o, o, p, S, p, a, r, k)
println(List.range(1, 10) flatMap (i => List.range(1, i) map (j => (i, j))))
//> List((2,1), (3,1), (3,2), (4,1), (4,2), (4,3), (5,1), (5,2), (5,3), (5,4), (
//| 6,1), (6,2), (6,3), (6,4), (6,5), (7,1), (7,2), (7,3), (7,4), (7,5), (7,6),
//| (8,1), (8,2), (8,3), (8,4), (8,5), (8,6), (8,7), (9,1), (9,2), (9,3), (9,4),
//| (9,5), (9,6), (9,7), (9,8))
var sum = 0 //> sum : Int = 0
List(1, 2, 3, 4, 5) foreach (sum += _)
println("sum : " + sum) //> sum : 15
println(List(1, 2, 3, 4, 6, 7, 8, 9, 10) filter (_ % 2 ==0))
//> List(2, 4, 6, 8, 10)
println(data filter (_.length == 5)) //> List(Scala, Spark)
println(List(1, 2, 3, 4, 5) partition (_ % 2 ==0))
//> (List(2, 4),List(1, 3, 5))
println(List(1, 2, 3, 4, 5) find (_ % 2 ==0)) //> Some(2)
println(List(1, 2, 3, 4, 5) find (_ <=0)) //> None
println(List(1, 2, 3, 4, 5) takeWhile (_ < 4))
//> List(1, 2, 3)
println(List(1, 2, 3, 4, 5) dropWhile (_ < 4))
//> List(4, 5)
println(List(1, 2, 3, 4, 5) span (_ < 4)) //> (List(1, 2, 3),List(4, 5))
def hastotallyZeroRow(m: List[List[Int]]) = m exists (row => row forall (_ == 0))
//> hastotallyZeroRow: (m: List[List[Int]])Boolean
val m= List(List(1,0,0), List(0,0,0), List(0,0,1))
//> m : List[List[Int]] = List(List(1, 0, 0), List(0, 0, 0), List(0, 0, 1))
println(hastotallyZeroRow(m)) //> true
}
ListBuffer ListArray Queue Stack
package database
import scala.collection.immutable.Queue
object ListBuffer {
println("Welcome to the Scala worksheet") //> Welcome to the Scala worksheet
import scala.collection.mutable.ListBuffer
val listBuffer = new ListBuffer[Int] //> listBuffer : scala.collection.mutable.ListBuffer[Int] = ListBuffer()
listBuffer += 1 //> res0: database.ListBuffer.listBuffer.type = ListBuffer(1)
listBuffer += 2 //> res1: database.ListBuffer.listBuffer.type = ListBuffer(1, 2)
println(listBuffer) //> ListBuffer(1, 2)
import scala.collection.mutable.ArrayBuffer
val arrayBuffer = new ArrayBuffer[Int]() //> arrayBuffer : scala.collection.mutable.ArrayBuffer[Int] = ArrayBuffer()
arrayBuffer += 1 //> res2: database.ListBuffer.arrayBuffer.type = ArrayBuffer(1)
arrayBuffer += 2 //> res3: database.ListBuffer.arrayBuffer.type = ArrayBuffer(1, 2)
println(arrayBuffer) //> ArrayBuffer(1, 2)
val empty = Queue[Int]() //> empty : scala.collection.immutable.Queue[Int] = Queue()
val queue1 = empty.enqueue(1) //> queue1 : scala.collection.immutable.Queue[Int] = Queue(1)
val queue2 = queue1.enqueue(List(2,3,4,5)) //> queue2 : scala.collection.immutable.Queue[Int] = Queue(1, 2, 3, 4, 5)
println(queue2) //> Queue(1, 2, 3, 4, 5)
val (element, left) = queue2.dequeue //> element : Int = 1
//| left : scala.collection.immutable.Queue[Int] = Queue(2, 3, 4, 5)
println(element + " : " + left) //> 1 : Queue(2, 3, 4, 5)
import scala.collection.mutable.Queue
val queue = Queue[String]() //> queue : scala.collection.mutable.Queue[String] = Queue()
queue += "a" //> res4: database.ListBuffer.queue.type = Queue(a)
queue ++= List("b", "c") //> res5: database.ListBuffer.queue.type = Queue(a, b, c)
println(queue) //> Queue(a, b, c)
println(queue.dequeue) //> a
println(queue) //> Queue(b, c)
import scala.collection.mutable.Stack
val stack = new Stack[Int] //> stack : scala.collection.mutable.Stack[Int] = Stack()
stack.push(1) //> res6: database.ListBuffer.stack.type = Stack(1)
stack.push(2) //> res7: database.ListBuffer.stack.type = Stack(2, 1)
stack.push(3) //> res8: database.ListBuffer.stack.type = Stack(3, 2, 1)
println(stack.top) //> 3
println(stack) //> Stack(3, 2, 1)
println(stack.pop) //> 3
println(stack) //> Stack(2, 1)
}
Set Map
package database
import scala.collection.mutable
import scala.collection.mutable.TreeSet
import scala.collection.immutable.TreeMap
import scala.collection.immutable.HashMap
object setmap {
println("Welcome to the Scala worksheet") //> Welcome to the Scala worksheet
val data = mutable.Set.empty[Int] //> data : scala.collection.mutable.Set[Int] = Set()
data ++= List(1, 2, 3) //> res0: database.setmap.data.type = Set(1, 2, 3)
data += 4; //> res1: database.setmap.data.type = Set(1, 2, 3, 4)
//data --= List(2, 3);
data.clear
println(data) //> Set()
val map = mutable.Map.empty[String, String] //> map : scala.collection.mutable.Map[String,String] = Map()
map("Java") = "Hadoop"
map("Scala") = "Spark"
println(map) //> Map(Scala -> Spark, Java -> Hadoop)
println(map("Scala")) //> Spark
val treeSet = TreeSet(9, 3, 1, 8, 0, 2, 7, 4, 6, 5)
//> treeSet : scala.collection.mutable.TreeSet[Int] = TreeSet(0, 1, 2, 3, 4, 5,
//| 6, 7, 8, 9)
println(treeSet) //> TreeSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
val treeSetForChar = TreeSet("Spark", "Scala", "Hadoop")
//> treeSetForChar : scala.collection.mutable.TreeSet[String] = TreeSet(Hadoop,
//| Scala, Spark)
println(treeSetForChar) //> TreeSet(Hadoop, Scala, Spark)
var treeMap = TreeMap("Scala" -> "Spark", "Java" -> "Hadoop")
//> treeMap : scala.collection.immutable.TreeMap[String,String] = Map(Java -> H
//| adoop, Scala -> Spark)
println(treeMap) //> Map(Java -> Hadoop, Scala -> Spark)
}