1、Spark读取HDFS文件
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
/** 环境及参数 */
val spark: SparkSession = SparkSession.builder().getOrCreate()
val sc: SparkContext = spark.sparkContext
sc.setLogLevel("WARN")
/** 逻辑 */
val source: RDD[String] = sc.textFile(inPath)
val resultArr: RDD[(String, String, String)] = source.map(x => {
...
})
2、scala原生IO读取
/**
* 获取路径下文件
*
* @param dir 目标路径
* @return
*/
def subdirs2(dir: File): Iterator[File] = {
val d = dir.listFiles.filter(_.isDirectory)
val f = dir.listFiles.filter(_.isFile).toIterator
f ++ d.toIterator.flatMap(subdirs2 _)
}
//数据路径,这里的inpath不需要到文件级别
val fileArr: Array[File] = subdirs2(new File(inPath)).toArray
//处理路径下的数据
fileArr.map(file => {
val source: Array[String] = scala.io.Source.fromFile(file).getLines().toArray
val result: Array[(String, String, String)] = source.map(line => {
val s: Array[String] = line.split(",")
(s(0), s(1), s(2))
})
})