1、代码
package com.yy.base
object ListBaseOps extends App {
//列表拼接:::(三个冒号)
println(List(1,2):::List(3,4):::List(5,6))
//列表长度
println(List(1,2,3,4,5,6).length)
val list = List("scala","spark","hadoop")
//取最后一个元素
println(list.last)
//除了最后一个元素之外的所有元素
println(list.init)
//反转
println(list.reverse)
//取前n个元素
println(list take 2)
//除去前n个元素
println(list drop 2)
//分成n个列表,从第n(0开始)个开始分
println(list splitAt 2)
//取第n个元素
println(list apply 2)
println(list(2))
val data = List('a','b','c','d','e')
//返回元素的索引
println(data.indices)
//索引和数据组合长Tuple
println(data.indices zip data)
//数据和索引组合
println(data.zipWithIndex)
println(data.toString())
//mkString:开始内容-分割符-结束内容
println(data.mkString("[", "|", "]"))
//默认
println(data.mkString)
val buffer = new StringBuilder
//赋值给buffer
data.addString(buffer,"[","||","]")
println(buffer)
//拷贝给数组Array
val arr = new Array[Char](10)
//5:跳过前5个字符,即空着
data.copyToArray(arr,5)
arr.foreach(print)
println
//迭代器
val iterator = data.iterator
//取下一个元素
println(iterator.next)
println(iterator.next)
}
2、结果
<pre name="code" class="plain">List(1, 2, 3, 4, 5, 6)
6
hadoop
List(scala, spark)
List(hadoop, spark, scala)
List(scala, spark)
List(hadoop)
(List(scala, spark),List(hadoop))
hadoop
hadoop
Range(0, 1, 2, 3, 4)
Vector((0,a), (1,b), (2,c), (3,d), (4,e))
List((a,0), (b,1), (c,2), (d,3), (e,4))
List(a, b, c, d, e)
[a|b|c|d|e]
abcde
[a||b||c||d||e]
abcde
a
b