name,age,fv_value libai,18,9999.99 xuance,30,99.99 diaochan,28,99.99
libai,18,9999.99
xuance,30,99.99
diaochan,28,99.99
-
读csv文件
import org.apache.spark.sql.{DataFrame, SparkSession}
object CreateDataFrameFromCsv {
def main(args: Array[String]): Unit = {
//创建SparkSession(是对SparkContext的包装和增强)
val spark: SparkSession = SparkSession.builder()
.appName(this.getClass.getSimpleName)
.master("local[*]")
.getOrCreate()
//①文件中有表头
val df: DataFrame = spark.read
.option("header", "true") //将第一行当作表头
.option("inferSchema", "true") //推断数据类型
.csv("src/main/scala/data/user.csv")
// df.printSchema()
// df.show()
//②文件中没有表头
val df2: DataFrame = spark.read
.option("inferSchema","true")
.csv("src/main/scala/data/user2.csv")
df2.toDF("name","age","fv")
df2.printSchema()
df2.show()
spark.stop()
}
}
-
写入csv类型的文件中
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.{DoubleType, IntegerType, StringType, StructField, StructType}
object WriteToCsv{
def main(args: Array[String]): Unit = {
//创建SparkSession
val spark: SparkSession = SparkSession.builder()
.appName(this.getClass.getSimpleName)
.master("local[*]")
.getOrCreate()
val sc: SparkContext = spark.sparkContext
val lines: RDD[String] = sc.textFile("src/main/scala/data/user.txt")
//row的字段没有名字 没有类型
val rdd1: RDD[Row] = lines.map(e => {
val split = e.split(",")
Row(split(0), split(1).toInt, split(2).toDouble)
})
//关联schema(字段名称、字段类型、是否可以为空)
val schema: StructType = StructType(
Array(
StructField("name", StringType),
StructField("age", IntegerType),
StructField("fv", DoubleType)
)
)
//将RowRDD与StructType中的schema关联
val df1: DataFrame = spark.createDataFrame(rdd1, schema)
df1.write.csv("src/main/scala/data/outcsv")
}
}