import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, TextInputFormat}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.NewHadoopRDD
object sparkReadDir{
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
conf.setAppName("testtoarquet")
conf.setMaster("local")
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
val sc = new SparkContext(conf)
var input = "C:\\Users\\mzz\\Desktop\\tt\\20180315\\"
var output = ""
//val value = sc.textFile(input+"20180314_HK5-10.82.26.22.txt")
val fileRDD = sc.newAPIHadoopFile[LongWritable, Text, TextInputFormat](input)
val hadoopRDD = fileRDD.asInstanceOf[NewHadoopRDD[LongWritable, Text]]
val fileAdnLine = hadoopRDD.mapPartitionsWithInputSplit(
Spark读取目录获取文件名
最新推荐文章于 2023-08-24 11:34:52 发布
本文介绍如何利用Apache Spark从指定目录中读取并获取所有文件的名称。通过Spark的API,可以高效地处理大量文件信息,适用于大数据处理场景。
摘要由CSDN通过智能技术生成