针对xss漏洞进行扫描器开发,实际测试中,xss小游戏靶场可以完成前10关,后面关卡使用字典够的话理论上也可以
'''扫描器,获取url,对palyord进行提取,进行字典尝试,得出结果'''
import re
from urllib.parse import urlsplit
import requests
获取url,xss字典内容,替换
class xss_scanner():
def __init__(self):
self.url=input('输入尝试的url:') #获取url
res=requests.get(self.url,timeout=10) #测试url是否可以使用
if res.status_code==200 or res.status_code==301:
print('可以正常访问')
else:
print('访问失败')
exit()
def xss_dict(self):
palyords=[]
with open('xss.txt','r') as fp:
xss=fp.readlines()
for x in xss:
palyords.append(x.strip())
return palyords #遍历xss字典,存储到列表里,方便后面使用
def xss_html(self):
url=urlsplit(self.url).query
pattern=r'(\w+)=([^&]+)' #\w前面以=号为界限,后面以&开头
res=re.findall(pattern,url) #先正则表达式,在str
paly=(dict(res)) #使用正则表达式取出palyord值,字典存储
# print(paly)
return paly
主体部分
def xss_url(self,headers):
headers=headers
paly=self.xss_html()
palyords=self.xss_dict()
for v in paly.values():
for i in palyords:
urls=self.url.replace(v,i) #遍历替换,原本的值,改为xss的值
u=requests.get(url=urls,headers=headers).text #hearders进行填写(不知道原因),request获取
# print(u)
# print(u.find(i))
res=u.rfind(i) #xss成功的话,会找到注入的字典内容
# print(res)
# print(u)
end=u[res-2:res-1]
payo1=u[res-1:res]
payo2=u[res]
# print(payo1,payo2)
# print(end)
if res !=-1 and end != '=' and payo1==payo2:
print(f'当前xss成功:{urls}')
elif res !=-1 and end == '=' and payo1==payo2:
print(f'当前xss成功:{urls}')
def xss_hea(self):
header=['Referer','User-Agent','Replace','Normal']
palyords = self.xss_dict()
hea=[]
# print(ck)
for h in header:
for i in palyords: #对headers进行尝试替换
headers={'Host':'xss.sqlsec.com','User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0',f'{h}':f'{i}'}
hea.append(headers)
return hea
def xss_cookie(self):
headers = {'Host': 'xss.sqlsec.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'}
palyords = self.xss_dict()
cookie = []
cook = requests.get(url=self.url).cookies
# print(cook)
ck = dict(cook)
# print(ck)
for c in ck.keys():
for p in palyords:
cookie.append({c: p})
# print(cookie)
for c in cookie:
# print(c.get(list(c.keys())[-1]))
use=requests.get(url=self.url,headers=headers,cookies=c).text
# print(use)
src=use.find(c.get(list(c.keys())[-1])) #对cookie进行替换尝试
# print(src)
if src !=-1:
print(f'使用该cookie{c}')
def xss_res(self):
headers=xss_scanner.xss_hea()
for hea in headers:
res=requests.get(url=self.url,headers=hea).text
h=(hea.get(list(hea.keys())[-1]))
r=res.find(h)
# print(hea)
# print(r)
if r != -1:
print(f'使用了该http头:{hea}')
收尾
if __name__ == '__main__':
headers={'Host':'xss.sqlsec.com','User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'}
xss_scanner=xss_scanner()
# xss_scanner.xss_url(headers) #普通get块
# xss_scanner.xss_res() #http头注块
xss_scanner.xss_cookie() #cookie块
待优化部分
互相的连接
多线程减少时间
结果判断准确的可以提升