以前用御剑扫描,但经常会出现bug,不抓流量还不一定能发现,结果可信度不高。当然也可以偷懒使用burpsuite之类的intruder功能来充当扫描器。但为了更好的掌控扫描的情况,就自己写了个python扫描器。代码如下:
#!/usr/bin/env python
# encoding: utf-8
import requests
import threading
import os
def eachFile(filepath):
pathDir = os.listdir(filepath)
for allDir in pathDir:
child = os.path.join('%s%s' % (filepath, allDir))
flist.append(child.decode('gbk'))
print child.decode('gbk')
def scan(host,payload):
url = host + payload
headers1 = {
'Accept': '*/*',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.1; ',
'Cache-Control': 'no-cache',
'Content-Type': 'application/x-www-form-urlencoded' # 保证post数据
}
post_data="cmd=1"
try:
res = requests.get(url,headers=headers1,timeout=5)
res = requests.post()
except Exception as e:
if str(e).find('timed out') != -1:
#'超时, 重试'
scan(host, payload)
return
#print "ERROR",url,'\n'
print e
return 0
#注意响应码的类型,字符或数值
if str(res.status_code)[0:2]=='40':
restext = len(res.text)-len(payload)
#虽然响应码是 40,但若是回复很长也很可疑
if restext > 300:
print "long response text:"+url
if len(str(res.headers)) >300:
print "long response header:" + url
else:
print url , res.status_code
return 1
if __name__ == '__main__':
print "\n[+]字典文件"
flist = []
#字典目录
path = './scan/'
eachFile(path)
#线程数
threads = 100
print('\n[+]扫描中')
host = "118.89.166.125"
host = "http://" + host + "/"
for f in flist:
print f
f2 = open(f, 'r')
for line in f2.readlines():
try:
payload = line.decode('utf8').strip("\r").strip("\n").replace(' ', '')
except Exception as e:
payload = line.decode('gbk').strip("\r").strip("\n").replace(' ', '')
#跳过空行
if payload == "":
continue
# 限制线程数
while (threading.activeCount() > threads):
time.sleep(1)
t1 = threading.Thread(target=scan, args=(host,payload))
t1.start()
#等待线程结束,为1
while(threading.activeCount()!=1):
time.sleep(1)
print "检测结束"