map:将数据集中的每条数据都做一个映射,返回一条新数据
flatMap:数据集中的每条数据都可以返回多条数据
mapPartitions:一次性对一个partition中的数据进行处理
代码
object TypedOperation {
case class Employee(name: String, age: Long, depId: Long, gender: String, salary: Long)
case class Department(id: Long, name: String)
def main(args: Array[String]): Unit = {
val sparkSession = SparkSession
.builder()
.appName("BasicOperation")
.master("local")
.getOrCreate()
import sparkSession.implicits._
import org.apache.spark.sql.functions._
val employeePath = this.getClass.getClassLoader.getResource("employee.json").getPath
val departmentPath = this.getClass.getClassLoader.getResource("department.json").getPath
val employeeDF = sparkSession.read.json(employeePath)
val departmentDF = sparkSession.read.json(departmentPath)
val employeeDS = employeeDF.as[Employee]
val departmentDS = departmentDF.as[Department]
employeeDS.map { employee => (employee.name, employee.salary + 1000) }.show()
departmentDS.flatMap {
department => Seq(Department(department.id + 1, department.name + "_1"), Department(department.id + 2, department.name + "_2"))
}.show()
employeeDS.mapPartitions {
employees => {
val result = scala.collection.mutable.ArrayBuffer[(String, Long)]()
while (employees.hasNext) {
var emp = employees.next()
result += ((emp.name, emp.salary + 1000))
}
result.iterator
}
}.show()
}
}
9808

被折叠的 条评论
为什么被折叠?



