dfResult = spark.sql("select * from tmp.lanfz_dirty_imei")
dfResult.write.format("csv").option("header","true").mode("overwrite").save("/user/lanfz/dirty_imei/")
# 注意:结果目录可能会产生多个文件
# 提供以下两种方式合并文件,并获取到本地
# 方式一(适用较大数据量)
dfResult.write.format("csv").option("header","true").mode("overwrite").save("/user/lanfz/dirty_imei/")
hadoop fs -getmerge /user/lanfz/dirty_imei/* dirty_imei.csv
# 方式二(适用较小数据量)
dfResult.repartition(1).write.format("csv").option("header","true").mode("overwrite").save("/user/lanfz/dirty_imei/")
hadoop fs -get /user/lanfz/dirty_imei/* dirty_imei.csv