#!/bin/bash
# Nginx 日志分析
# logpath="/bak/logs/nginx/access.log"
logpath=`echo $1`
log1="/tmp/log1"
log2="/tmp/log2"
# 直接取接口名称
#interface=`sed -e 's/"//g' -e 's/?.*$//g' log |awk '{print $7}'| grep "^/v2/"|sort -u`
# 取接口及访问时间 注意最后的 grep -v 引号中有空格
sed -e 's/"//g' -e 's/?.*$//g' $logpath | awk '{print $7,$NF}'| grep "^/v2/"| grep -v " /v2/" |grep -E -v "http|html|//|%" | grep -v "/v2/rest/share.*" >$log1
# 从 $log1 取所有接口名称
interface=`awk '{print $1}' $log1 | sort -u`
echo "" > $log2
# 循环查找每个接口访问总时间
for j in $interface
do
t=`grep -w "$j" $log1 | awk '{sum+=$2} END {print sum}'`
echo "$t $j">>$log2
done
echo -e "\033[32m------------ Log File is $logpath ----------------------------------------------------------\033[0m"
# 每个接口总的访问时间 排序并重新排版
sort -nr $log2 |awk 'BEGIN {print "Time\t\tOWL InterFace"} {printf "%-10s %-10s\n",$1,$2} END {print "End Analysis"}'
echo -e "\033[32m------------ Log File is $logpath ----------------------------------------------------------\033[0m"
# 每个接口访问次数排序
awk '{print $1}' $log1| sort | uniq -c |sort -nr | awk 'BEGIN {print "Number\t\tOWL InterFace"} {printf "%-10s %-10s\n",$1,$2} END {print "End Analysis"}'
echo -e "\033[32m------------ Log File is $logpath ----------------------------------------------------------\033[0m"
# 响应时间大于500ms接口访问排序
awk '$2>0.5 {print $1}' $log1 | sort | uniq -c | sort -rn | awk 'BEGIN {print "Time>500ms\tOWL InterFace"} {printf "%-10s %-10s\n",$1,$2} END {print "End Analysis"}'
转载于:https://my.oschina.net/direnjie/blog/546080