python实现的网站目录扫描器

import optparse
import requests
from multiprocessing.dummy import Pool as ThreadPool #pool表示线程池
#参数设置
def get_args():
    usage="-u <target url>\n --level <rank for scan>\n -d <dictionary path>\n -n <threading number>"
    parser=optparse.OptionParser(usage)
    parser.add_option("-u","--url",dest='url',help="please input target url",type="string")
    parser.add_option("--level",dest='level',help="please choose which level to use",type="string")
    parser.add_option("-d","--dic",dest='dic',help="dictionary path",default='F:\yujian dictionary++.txt')
    parser.add_option("-n",dest="num",help="number of thread,default 5",type="int",default=5)
    (options,args)=parser.parse_args()
    return options
#处理URL和字典
def check(target,dict_path):
    t_url=target
    if not t_url.endswith('/'):
        t_url=t_url + '/'
    with open(dict_path,"r") as f:
        a=f.read()
        dirs=a.split("\r\n")
        tempdirs=[]
        for r in dirs:
            r=r.strip()
            if(r.startwith('/')):
                r=r[1:]
            r=t_url + r
            tempdirs.append(r)
    return tempdirs
def scan1(love):
    r_list=[]
    try:
        for i in love:
            r=requests.head(i)
            if(r.status_code not in [404,500]):
                print(i)
                r_list.append(i)
        return r_list
    except:
        print("some errors")
#代理访问
def scan2(love):
    r_list=[]
    proxy=[]
    header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0'}
    with open(proxy_path) as f:
        for line in f.readlines():
            line="http://" + line.strip('\n')
            proxy.append(line)
    n=length(proxy)
    try:
        for i in length(love):
            num=random.randint(0,n)
            proxies={"http":proxy[num]}
            r=requests.head(i,headers=header,proxies=proxies)
            time.sleep(1)
            if(r.status_code not in [404,500]):
                print(i)
                r_list.append(i)
        return r_list
    except:
        print("some errors")    

 

User_Agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0'
header = {}
header['User-Agent'] = User_Agent
  
'''
获取所有代理IP地址
'''
def getProxyIp():
    proxy = []
    for i in range(1,2):
        try:
            url = 'http://www.xicidaili.com/nn/'+str(i)
            req = requests.get(url,headers=header)
            res = req.text
            soup = BeautifulSoup(res,'lxml')
            ips = soup.findAll('tr')
            for x in range(1,len(ips)):
                ip = ips[x]
                tds = ip.findAll("td")
                ip_temp = tds[1].contents[0]+":"+tds[2].contents[0]
                proxy.append(ip_temp)
        except:
            continue
    return proxy
  
def validateIp(proxy):
    
    url = "http://ip.chinaz.com/getip.aspx"
    f = open("E:\ip_proxy.txt","w")
    socket.setdefaulttimeout(3)
    for i in range(0,len(proxy)):
        try:
            ip = proxy[i].strip().split("\t")
            proxy_host = "http://"+ip[0]+":"+ip[1]
            proxy_temp = {"http":proxy_host}
            res = requests.get(url,proxies=proxy_temp).text
            f.write(proxy[i]+'\n')
            print(proxy[i])
        except:
            continue
    f.close()
#多线程
def mult(love,pro_num):
    pool=ThreadPool(processes=pro_num)#创建n个线程池并发执行
    #result_list=pool.map(partial(check,target=target,dict_path=dict_path))
    result=pool.map(scan1,love)
    pool.close()
    pool.join()
    return result
def main():
    opts = get_args()
    url = opts.url
    dic = opts.dlc
    num = opts.num
    level = opts.level
    global timeout
    love = check(url, dic)
    # 多线程
    if(level == 1):
        mult(love, num)

 

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值