ps -ef 杀进程
ps -ef | grep file-flume-kafka | grep -v grep |awk '{print \$2}' | xargs kill
磁盘监控
在df -h 和df -i 显示使用率100%,基本解决方法都是删除文件。
df -h 是去删除比较大无用的文件-----------大文件占用大量的磁盘容量。
df -i 则去删除数量过多的小文件-----------过多的文件占用了大量的inode号。
命令 | 作用 |
---|---|
du -sh | 查看/下整个文件系统大小 |
du -sh * | 统计当前文件夹下文件大小 |
du -sm * sort -n | 统计当前目录大小,并按大小排序 |
du -sk * sort -n | 统计当前目录大小,并按大小排序 |
grep
Grep选项:
* : 表示当前目录所有文件,也可以是某个文件名
-r 是递归查找
-n 是显示行号
-R 查找所有文件包含子目录
-i 忽略大小写
递归查找目录下含有该字符串的所有文件
查找当前目录下,包含“bigdata07”字符串的所有文件
【方式1】(其中,r 表示递归, n 表示查询结果显示行号):
grep -rn “bigdata07” ./
效果如下:
【方式2】
find ./ -name “.” | xargs grep “bigdata07”
效果如下:
【方式3】(-l 表示只显示文件名)
find . | xargs grep -ri “bigdata07”
或
find . | xargs grep -ri “bigdata07” -l
效果如下:
[jiang@bigdata07 bin]$ find . | xargs grep -ri "bigdata07"
./xsync:for host in bigdata07 bigdata08 bigdata08
./xcall:for host in bigdata07 bigdata08 bigdata09
./hdp.sh: ssh bigdata07 "/opt/module/hadoop-3.1.3/sbin/start-dfs.sh"
./hdp.sh: ssh bigdata07 "/opt/module/hadoop-3.1.3/bin/mapred --daemon start historyserver"
./hdp.sh: ssh bigdata07 "/opt/module/hadoop-3.1.3/bin/mapred --daemon stop historyserver"
./hdp.sh: ssh bigdata07 "/opt/module/hadoop-3.1.3/sbin/stop-dfs.sh"
Binary file ./.xcall.swp matches
Binary file ./.jpsall.swp matches
Binary file ./.jpsall.swo matches
./lg.sh:for i in bigdata07 bigdata08; do
./f1.sh: for i in bigdata07 bigdata08
./f1.sh: for i in bigdata07 bigdata08
Binary file ./.f1.sh.swp matches
./jpsall:for i in bigdata07 bigdata08 bigdata09
Binary file ./.ods_to_dwd_log.sh.swn matches
./zookeeper.sh: for i in bigdata07 bigdata08 bigdata09
./zookeeper.sh: for i in bigdata07 bigdata08 bigdata09
./zookeeper.sh: for i in bigdata07 bigdata08 bigdata09
Binary file ./.ods_to_dwd_log.sh.swo matches
./mysql_to_hdfs_init.sh:--connect jdbc:mysql://bigdata07:3306/$APP \
./kafka.sh: for i in bigdata07 bigdata08 bigdata09
./kafka.sh: for i in bigdata07 bigdata08 bigdata09
Binary file ./.logger.sh.swp matches
./mysql_to_hdfs.sh:--connect jdbc:mysql://bigdata07:3306/$APP \
./mysql_to_hdfs.sh: --connect jdbc:mysql://bigdata07:3306/$APP \
./mysql_to_hdfs.sh: --connect jdbc:mysql://bigdata07:3306/$APP \
./logger.sh: for i in bigdata07 bigdata08 bigdata09
./logger.sh: for i in bigdata07 bigdata08 bigdata09
Binary file ./.ods_to_dwd_log.sh.swp matches
Binary file ./.mysql_to_hdfs.sh.swp matches
./demosqooptohdfs.sh:--connect jdbc:mysql://bigdata07:3306/$APP \
./sqoopdemo.sh: --connect jdbc:mysql://bigdata07:3306/$APP \
Binary file ./.hdfs_to_ods_db.sh.swp matches
./xsync:for host in bigdata07 bigdata08 bigdata08
./xcall:for host in bigdata07 bigdata08 bigdata09
./hdp.sh: ssh bigdata07 "/opt/module/hadoop-3.1.3/sbin/start-dfs.sh"
./hdp.sh: ssh bigdata07 "/opt/module/hadoop-3.1.3/bin/mapred --daemon start historyserver"
./hdp.sh: ssh bigdata07 "/opt/module/hadoop-3.1.3/bin/mapred --daemon stop historyserver"
./hdp.sh: ssh bigdata07 "/opt/module/hadoop-3.1.3/sbin/stop-dfs.sh"
Binary file ./.xcall.swp matches
Binary file ./.jpsall.swp matches
Binary file ./.jpsall.swo matches
./lg.sh:for i in bigdata07 bigdata08; do
./f1.sh: for i in bigdata07 bigdata08
./f1.sh: for i in bigdata07 bigdata08
Binary file ./.f1.sh.swp matches
./jpsall:for i in bigdata07 bigdata08 bigdata09
Binary file ./.ods_to_dwd_log.sh.swn matches
./zookeeper.sh: for i in bigdata07 bigdata08 bigdata09
./zookeeper.sh: for i in bigdata07 bigdata08 bigdata09
./zookeeper.sh: for i in bigdata07 bigdata08 bigdata09
Binary file ./.ods_to_dwd_log.sh.swo matches
./mysql_to_hdfs_init.sh:--connect jdbc:mysql://bigdata07:3306/$APP \
./kafka.sh: for i in bigdata07 bigdata08 bigdata09
./kafka.sh: for i in bigdata07 bigdata08 bigdata09
Binary file ./.logger.sh.swp matches
./mysql_to_hdfs.sh:--connect jdbc:mysql://bigdata07:3306/$APP \
./mysql_to_hdfs.sh: --connect jdbc:mysql://bigdata07:3306/$APP \
./mysql_to_hdfs.sh: --connect jdbc:mysql://bigdata07:3306/$APP \
./logger.sh: for i in bigdata07 bigdata08 bigdata09
./logger.sh: for i in bigdata07 bigdata08 bigdata09
Binary file ./.ods_to_dwd_log.sh.swp matches
Binary file ./.mysql_to_hdfs.sh.swp matches
./demosqooptohdfs.sh:--connect jdbc:mysql://bigdata07:3306/$APP \
./sqoopdemo.sh: --connect jdbc:mysql://bigdata07:3306/$APP \
Binary file ./.hdfs_to_ods_db.sh.swp matches
[jiang@bigdata07 bin]$
【方式4 】(推荐,更为通用!)
如果不知道文件所在的大致目录,知道文件的类型(例如文本类型 txt),可以在root根目录 / 下根据特定字符串进行查找:
find ./ -type f -name “*.sh” | xargs grep “bigdata07”
效果如下:
查找当前目录下后缀名过滤的文件
grep -Rn "datainfo" *.py
当前目录及设定子目录下的符合条件的文件
grep -Rn "datainfo" /home/hadoop/nisj/automationDemand/ *.py
结合find命令过滤目录及文件名后缀
find /home/hadoop/ -type f -name '*.py'| xargs grep -n 'datainfo'
量递归删除当前目录下,以.class为后缀的文件。
find . -name '*.class' -type f -print -exec rm -rf {} \;
.表示从当前目录开始递归查找
-name '*.class'根据名称来查找,查找指定目录下以.class结尾的文件
-type f查找的类型为文件
-print输出查找到的文件全路径名
-exec后面写要执行的命令。
删除当前目录下以.txt结尾的文件:
find -name "*.txt" -exec rm -f '{}' \;
批量去掉文件后缀
for i in `ls *.COMPLETED | awk -F '.' '{print $1}'` do mv $i.zip.COMPLETED $i.zip done
ls *.COMPLETED | awk -F '.' '{print $1}' | xargs -i -t mv {}.zip.COMPLETED {}.zip
find xargs 解决
find ./ -name "*.COMPLETED" | awk -F "." '{print $2}' | xargs -i -t mv ./{}.zip.COMPLETED ./{}.zip
效果如下:
[jiang@jiang05 zipcsv]$ find ./ -name "*.COMPLETED" | awk -F "." '{print $2}' | xargs -i -t mv ./{}.zip.COMPLETED ./{}.zip
mv .//autobackup_history_20210811041126292.zip.COMPLETED .//autobackup_history_20210811041126292.zip
mv .//autobackup_history_20210827041802085.zip.COMPLETED .//autobackup_history_20210827041802085.zip
mv .//autobackup_history_20210827041341990.zip.COMPLETED .//autobackup_history_20210827041341990.zip
[jiang@jiang05 zipcsv]$ ll
total 244
-rw------- 1 aiiap aiiap 690 Aug 30 11:36 autobackup_history_20210811041126292.zip
-rw-r--r-- 1 aiiap aiiap 110521 Aug 27 13:50 autobackup_history_20210827041341990.zip
-rw-r--r-- 1 aiiap aiiap 132214 Aug 27 13:51 autobackup_history_20210827041802085.zip
[jiang@jiang05-05 zipcsv]$
grep -rn "datainfo" /home/hadoop/
查找当前目录下后缀名过滤的文件
grep -Rn "datainfo" *.py
当前目录及设定子目录下的符合条件的文件
grep -Rn "datainfo" /home/hadoop/nisj/automationDemand/ *.py
结合find命令过滤目录及文件名后缀
find /home/hadoop/ -type f -name '*.py'| xargs grep -n 'datainfo'
去掉Linux文件中的注释行和空行
grep -v "$" /root/math (去掉所有 有空格的行)
grep -v "^$" /root/math (去掉空行)
grep ^[^#] 文件 >> 目标文件
查看文件内容时,排除注释行查看的方法
[jiang@jiang-29 jiang]$ grep -v "^#" elasticsearch-7.2.0/config/elasticsearch.yml | more
cluster.name: es-jiang
node.name: node-1
node.master: true
node.data: true
http.cors.enabled: true
http.cors.allow-origin: "*"
http.port: 9200
transport.tcp.port: 9300
path.data: /jiang/to/data
path.logs: /jiang/to/logs
bootstrap.memory_lock: true
network.host: 0.0.0.0
discovery.seed_hosts: ["127.0.0.1"]
cluster.initial_master_nodes: ["node-1"]
sed
替换
sed -i s/192.168.66.147/192.168.66.148/g /usr/local/redis-cluster/7001/redis.conf
如何在JAR文件中搜索字符串
方式一:
zipgrep "BEGIN REQUEST" file.jar
[root@jiang01 ~]# zipgrep "0.0.0.0" catalina.jar
org/apache/catalina/util/ServerInfo.properties:server.number=0.0.0.0
[root@jiang01 ~]#
find libdir -name "*.jar" -exec zipgrep "BEGIN REQUEST" '{}' \;
[root@jiang01 ~]# find ./ -name catalina.jar -exec zipgrep "0.0.0.0" '{}' \;
org/apache/catalina/util/ServerInfo.properties:server.number=0.0.0.0
[root@jiang01 ~]#