##修改hdfs日志的读写权限
hdfs dfs -ls /tmp/logs/autoflow/logs
hdfs dfs -chmod a+rwx /tmp/logs/autoflow/logs
##查看所有node节点状态
yarn node -list -all
##查看当前运行的应用
yarn application -list
##杀掉application
yarn application -kill application_1703647286630_204559
##获取application日志
##yarn logs -applicationId
yarn logs -applicationId application_1703647286630_204559
##获取container日志
##yarn logs -applicationId -containerId
yarn logs -applicationId application_1678411202227_0001 -containerId container_1678411202227_0001_01_000001 > file.txt
##列出所有尝试的applicationattempt
##yarn applicationattempt -list
yarn applicationattempt -list application_1703647286630_204559
##列出所有Container(容器在任务完成之后就销毁了)
##yarn container -list
yarn container -list appattempt_1703647286630_204559_000001
补充命令:
##获取文件前1万行
head -n 10000 file.txt > newfile.txt
##获取文件最后1万行
tail -n 10000 file.txt > newfile.txt
##查看报错信息
Caused by: java.io.IOException: Unable to close file because the last blockBP-1371676964-10.2.15.61-1700837018995:blk_1128276916_54542751 does not have enough number of replicas.
at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:966)
at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:909)
at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:892)
at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:847)
at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101)
at org.apache.hive.shaded.parquet.hadoop.ParquetFileWriter.end(ParquetFileWriter.java:648)
at org.apache.hive.shaded.parquet.hadoop.InternalParquetRecordWriter.close(InternalParquetRecordWriter.java:117)
at org.apache.hive.shaded.parquet.hadoop.ParquetRecordWriter.close(ParquetRecordWriter.java:162)
at org.apache.hadoop.hive.ql.io.parquet.write.ParquetRecordWriterWrapper.close(ParquetRecordWriterWrapper.java:127)
at org.apache.hadoop.hive.ql.io.parquet.write.ParquetRecordWriterWrapper.close(ParquetRecordWriterWrapper.java:144)
at org.apache.flink.connectors.hive.write.HiveBulkWriterFactory$1.finish(HiveBulkWriterFactory.java:79)
at org.apache.flink.formats.hadoop.bulk.HadoopPathBasedPartFileWriter.closeForCommit(HadoopPathBasedPartFileWriter.java:71)
at org.apache.flink.streaming.api.functions.sink.filesystem.Bucket.closePartFile(Bucket.java:263)
hdfs没有足够的空间,进一步分析是cdh某个节点挂掉了
导致flink连接不上,获取不到资源!!!