spark读取集群Hadoop上.dat文件数据,乱码处理和字符分割
- 先看代码
package xx.gg
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
object ReadHDFS {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]")
val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
val sc = spark.sparkContext
val data_path = "hdfs://192.168.10.100:9000/data_row/f_dep.dat"
// val rowDF: Dataset[String] = spark.read.textFile(data_path)
var inputRdd: RDD[String] = sc.hadoopFile(data_path, classOf[TextInputFormat],
classOf[LongWritable], classOf[Text]).map(
pair => new String(pair._2.getBytes, 0, pair._2.getLength, "GBK"))
val value: RDD[String] = inputRdd.map(x => x.split("\\x01").mkString(","))
value.foreach(println)
}
}
运行结果对比:
直接读取
- 乱码
- 未分割的字符串
- 处理完后的数据