之前我的几篇文章写了关于怎么找到注入敏感地址,以及注入检测方式
然后做了一次优化整理,集成两项功能,只需要提供url即可实现sql注入自动扫描
urllist.txt内部放上采集的url
会在当前目录输出结果
源码如下
#coding = utf-8
#__author = lzyq
import re
import requests
import time
from bs4 import BeautifulSoup as asp
import random
import os
print unicode('''
作者:浪子燕青
作者QQ:982722261
''','utf-8')
time.sleep(8)
headeraa = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)',}
zhaohan = open('mINJlogo.txt','a+')
zhaohan8 = open('INJ.txt','a+')
huixian1 = "is not a valid MySQL result resource"
huixian2 = "ODBC SQL Server Driver"
huixian3 = "Warning:ociexecute"
huixian4 = "Warning: pq_query[function.pg-query]"
huixian5 = "You have an error in your SQL syntax"
huixian6 = "Database Engine"
huixian7 = "Undefined variable"
huixian8 = "on line"
hansb = open('urllist.txt','r')
hanssb = hansb.readlines()
hansb.close()
ttzh = str(time.ctime())
zhzhzh = open('url.txt','a+')
for urllists in hanssb:
urllistx = urllists.strip('\n')
print urllistx
time.sleep(3)
try:
sss5 = []
bingasp = []
bingphp = []
bingaspx = []
bingjsp = []
han = requests.get(url=urllistx,headers=headeraa,timeout=10)
print han.status_code
print 'Waiting...........................'
soup = asp(han.content)
hrefs = soup.find_all(href=re.compile(r'asp\?'))
for href in hrefs:
href = href.get('href')
if 'http' in href:
bingasp.append(href)
else:
sss5.append(href)
hrefs = soup.find_all(href=re.compile(r'php\?'))
for href in hrefs:
href = href.get('href')
if 'http' in href:
bingphp.append(href)
else:
sss5.append(href)
hrefs = soup.find_all(href=re.compile(r'aspx\?'))
for href in hrefs:
href = href.get('href')
if 'http' in href:
bingaspx.append(href)
else:
sss5.append(href)