scala2.12 java8 idea
编写scala入门小程序 配置安装 百度 OR Google
无需多话直接上代码
/**
* Created by lhh
*/
import scala.collection._
import scala.collection.immutable.HashSet
import scala.collection.mutable.ListBuffer
object scalaDemo {
//*********说明**********
/**
* <集合>
* Scala的集合有三大类:序列Seq、集Set、映射Map,所有的集合都扩展自Iterable特质
* 在Scala中集合有可变(mutable)和不可变(immutable)两种类型,
* immutable类型的集合初始化后就不能改变了(注意与val修饰的变量进行区别)
*/
/**
* <序列>
* 不可变的序列 import scala.collection.immutable._
* 在Scala中列表要么为空(Nil表示空列表)要么是一个head元素加上一个tail列表。
* 9 :: List(5, 2) :: 操作符是将给定的头和尾创建一个新的列表
* 注意::: 操作符是右结合的,如9 :: 5 :: 2 :: Nil相当于 9 :: (5 :: (2 :: Nil))
*
*/
//*********方法**********
def forMethod(): Unit = {
for (i <- 1 to 10) println(i)
}
def forArray(): Unit = {
val arr = Array(1, 2, 3, 4, 5)
for (i <- arr) print(i)
println()
for (i <- (0 until arr.length).reverse) print(i)
println()
for (i <- arr.indices.reverse) print(i)
}
def makeMethodToFunc(): Unit = {
//直接吧方法编程函数 在scala的使用中一般在函数中实现业务逻辑
val fuc1 = m1 _
println(fuc1(2, 2))
}
def m1(x: Int, y: Int): Int = x * y
def newArray(): Unit = {
val arr = new Array[Int](10)
val arrString = new Array[String](10)
val arr3 = Array("hadoop", "storm", "spark")
println("arr的hashcode" + arr)
println("arrString的hashcode" + arrString)
//int缓冲数组默认值是000000000 string | null
println("arr的缓冲数组" + arr.toBuffer)
println("arr的缓冲数组" + arrString.toBuffer)
println("arr3的角标为2的索引的值" + arr3(2))
}
def forArraytoMap(): Unit = {
val arr = Array("hadoop", "storm", "spark")
for (i <- arr.map(_ + "map")) println(i)
}
def ArrayYieldDemo(): Unit = {
val arr = Array(1, 2, 3, 4, 5, 6, 7, 8)
val res = for (e <- arr if e % 2 == 0) yield e * 10
println("复杂java思想" + res.toBuffer)
//More advanced wording
//使用过滤器和占位符去解决
val r = arr.filter(_ % 2 == 0).map(_ * 10)
println("简单scala思想" + r.toBuffer)
}
def ArraySimpleMethod(): Unit = {
val arr = Array(1, 2, 3, 9, 5, 6, 7, 4, 8)
println(arr.sum)
for (i <- arr.sorted) print(i)
println()
println(arr.max)
}
def scalaMap(): Unit = {
val stringToString = Map("tom" -> "333")
val str = stringToString.getOrElse("tom", 333)
println(str)
}
def yuanzuDemo(): Unit = {
val t = ("hadoop", 1233, 33.22)
val value1 = t._1 //获取元组中的内容角标不同于list的角标是从1开始的
println(value1)
}
def twoArrayNewArray_zip(): Unit = {
val scores = Array(88, 95, 99)
val names = Array("tom", "sun", "jetty")
for (i <- names.zip(scores)) println(i)
for (i <- scores.zip(names)) println(i)
}
def ImmutListDemo(): Unit = {
val lst1 = scala.collection.immutable.List(1, 2, 3)
val lst0 = scala.collection.immutable.List(4, 5, 6)
val lst2 = 0 :: lst1
//+在前就是往前面插入
// println(lst2)
// println(lst1.::(0))
// println(0+:lst1)
println(lst1.+:(0))
println(lst1.:+(0))
println(lst0 ++ lst1)
println(lst1 ++: lst0) //lst1在前
}
def mutListDemo(): Unit = {
val lst0 = ListBuffer[Int](1, 2, 3)
val lst1 = new ListBuffer[Int]
lst1 += 4
lst1.append(5)
println(lst1)
lst0 ++= lst1
println(lst0)
}
def ImmutSetDemo(): Unit = {
val set1 = new HashSet[Int]()
val set2 = set1 + 4
val set3 = set1 ++ Set(5, 6, 7)
val set0 = Set(1, 3, 4) ++ set1
println(set0)
}
def myMapDemo(): Unit = {
val map1 = new mutable.HashMap[String, Int]()
map1("spark") = 1
map1 += (("hadoop", 2))
map1.put("storm", 3)
println(map1)
}
def classDemo(): Unit = {
val p = new Person
println(p.age+" __ "+p.id)
}
def main(args: Array[String]): Unit = {
//********以方法的形式调用**********
// ifelse()
// ifelse1() //混合表达
// forMethod()//for 循环
// forArray()
// forArraytoMap()
// makeMethodToFunc()
// newArray()
// ArrayYieldDemo()
// ArraySimpleMethod()
// scalaMap() //在scala中叫做映射
// yuanzuDemo()
// twoArrayNewArray_zip()
// ImmutListDemo()
// mutListDemo()
// ImmutSetDemo()
// myMapDemo()
classDemo()
}
/**
* if else 测试
*/
def ifelse(): Unit = {
val x = 1
val y = if (x > 1) 1 else -1
println(y)
}
/**
*
*/
def ifelse1(): Unit = {
val x = 1
val m = if (x > 2) 1
println(m)
}
/**
* 混合表达
*/
def ifelse2(): Unit = {
val x = 1
val y = if (x > 1) 1 else "error"
println(y)
}
//**********函数**********
val func = (x: Int, y: Double) => (y, x)
val func2: (Int, Double) => (Double, Int) = { (x, y) => (y, x) }
val func1 = (x: Int, y: Double) => (x, y)
}
以上代码可以直接运行 可以互相讨论