package cn.ac.iie
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.spark.sql.SparkSession
object MergerFile {
def main(args: Array[String]): Unit = {
val spark: SparkSession = SparkSession
.builder()
.appName("mergeFile")
.enableHiveSupport()
.getOrCreate()
var timestamp = System.currentTimeMillis().toString
val target = args(0) //待合并的文件夹
val configuration = new Configuration()
val output = new Path(target)
val hdfs = output.getFileSystem(configuration)
val status = hdfs.listStatus(output);
var files: List[FileStatus] = List.empty
var paths: List[String] = List.empty //文件名字符串
var length = 0l //所有文件的大小
for (fs <- status) {
if (fs.getLen == 0) { //大小为0直接删除
hdfs.delete(fs.getPath, true)
}
if (fs.getLen() < 234217728) {
// 如果块大小<100M(128M=134217728,选择234217728的原因是原文件未经过snappy压缩,输出是压缩的,所以适当放大了阈值)
paths :+= fs.getPath.toString
length += fs.getLen
}
}
val mergeFilesCount = (length / 1024 / 1024 / 128 / 3 + 1).toInt //合并后文件的个数
var mergeFiles = spark.read.load(paths: _*).repartition(mergeFilesCount) //128M一个
mergeFiles.write.parquet(s"$target/.TMP")
val resStatus = hdfs.listStatus(new Path(s"$target/.TMP"))
//移动结果文件
for (fs <- resStatus) {
if (fs.getLen > 0) { //排除spark自动生成的_success文件
hdfs.rename(fs.getPath, output)
}
}
print(s"merge ${paths.size} files => $mergeFilesCount files")
//删除临时目录
hdfs.delete(new Path(s"$target/.TMP"), true)
//删除原小文件
for (fs <- paths) {
hdfs.delete(new Path(fs), true)
}
}
}
工具类--hdfs小文件合并
最新推荐文章于 2023-12-26 17:20:57 发布