python采集器_python写exploit采集器

importrequestsfrom bs4 import *

importthreadingimportreimportoptparse

urlsd=[]#neirons=[]

defmain():

parser=optparse.OptionParser()

parser.add_option('-m',action='store_true',dest='home',help='Save the home directory in the local area')

parser.add_option('-w',action='store_true',dest='web',help='Save all the attack loads of Web')

parser.add_option('-s',dest='search',help='search exploit')

parser.add_option('-y',action='store_true',dest='long',help='Save the Long-range all exploit')

parser.add_option('-b',action='store_true',dest='local',help='Save the local all exploit')

(options,args)=parser.parse_args()ifoptions.home:

poc()elifoptions.web:

web()elifoptions.search:

searchs=options.search

searchexploit(searchs)elifoptions.long:

logins()elifoptions.local:

local()else:

parser.print_help()

exit()defpoc():globalheadersprint('[+]Emptying exploit1.txt')

kw=open('exploitcs1.txt','w')

kw.close()print('[+]Complete the emptying')print('[+] Generating a new list of exploit')

url='http://expku.com/'headers={'user-agetn':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}

rest=requests.get(url=url,headers=headers)

qinx=rest.content.decode('gbk')

kop=BeautifulSoup(qinx,'html.parser')for x in kop.find_all('a'):

a=re.findall('.*?',str(x))

neiron="".join(a)

nei=BeautifulSoup(neiron,'html.parser')

uw=nei.find_all('a')for u inuw:

u1=u.get('href')

urlsd.append('http://expku.com/'.strip()+u1)

urlsd.remove(urlsd[0])

lk=list(set(urlsd))for m inlk:

rest2=requests.get(url=m,headers=headers)

pl=BeautifulSoup(rest2.content.decode('gbk'),'html.parser')for l in pl.find_all('h1'):

ks='title:',l.get_text(),'','url:',rest2.urlprint(ks)

li='{}'.format(ks)

xr=li.replace('(','').replace(')','').replace(',','').replace("''",'')

pw=open('exploitcs1.txt','a')

pw.write(xr)

pw.write('\n')

pw.close()defweb():print('[+]empty exploit web.txt')

odw=open('exploitweb.txt','w')

odw.close()print('[+]empty complete')print('[+]Start writing to the collected web exploit')

urlsd=[]

headers={'user-agetn': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}for h in range(88):

url='http://expku.com/web/list_6_{}.html'.format(h)

reques=requests.get(url=url,headers=headers)

kw=BeautifulSoup(reques.content.decode('gbk'),'html.parser')

vb=kw.find_all('a')for q invb:

pq=q.get('href')

urls='http://expku.com'.strip()+pq

kq=re.findall('http://expku.com/web/.*.html',urls)for k inkq:

urlsd.append(k)

kc=list(set(urlsd))for b inkc:

tfs=requests.get(url=b,headers=headers)

bds=BeautifulSoup(tfs.content.decode('gbk'),'html.parser')for t in bds.find_all('h1'):print(t.get_text(), '', tfs.url)print(t.get_text(),'',tfs.url,file=open('exploitweb.txt','a'))defsearchexploit(searchs):print('[+]search give the result as follows:')

jg=[]

rs=[]

urlsh=[]

headers= {'user-agetn': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}

urls='http://expku.com/search.php?keyword={}'.format(searchs)

resq=requests.get(url=urls,headers=headers)

weq=BeautifulSoup(resq.content.decode('gbk'),'html.parser')

oeq=weq.find_all('a')for r inoeq:

ds=r.get('title')

durl=r.get('href')

burw=re.findall('/.*/.*.html',durl)

op="".join(burw)

rs.append(op)

kdw= '{}'.format(ds)

jg.append(kdw.replace('None', ''))while '' inrs:

rs.remove('')for g inrs:

uw='http://expku.com'.strip()+g

urlsh.append(uw)

urlsqf='http://expku.com'.strip()+durlwhile '' injg:

jg.remove('')for g inrange(0,len(urlsh)):print(jg[g],urlsh[g])deflogins():print('[+]empty exploitlong.txt')

lwe=open('exploitlong.txt','w')

lwe.close()print('[+]Get all remote exploit')

urls=[]

zj=[]

headers={'user-agetn': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}for i in range(75):

url='http://expku.com/remote/list_4_{}.html'.format(i)

regw=requests.get(url=url,headers=headers)

lvq=BeautifulSoup(regw.content.decode('gbk'),'html.parser')

fwq=lvq.find_all('a')for d infwq:

eq=d.get('href')

oeq=re.findall('/remote/.*.html',eq)for b inoeq:

ks='http://expku.com'.strip()+b

urls.append(ks)

qwe=list(set(urls))for asd in lvq.find_all('a'):

kl=re.findall('.*',str(asd))for n inkl:

vk=''.strip()+n

peq=BeautifulSoup(vk,'html.parser')for t inqwe:

zj.append(peq.get_text()+' '+t)

jb=list(set(zj))for j injb:print(j)print(j,file=open('exploitlong.txt','a'))deflocal():print('[+]empty exploitlocal.txt')

wd=open('exploitlocal.txt','w')

wd.close()print('[+]get local exploit')for j in range(56):

uk=[]

url='http://expku.com/local/list_5_{}.html'.format(j)

headers={'user-agetn': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'}

rwqe=requests.get(url=url,headers=headers)

vdq=BeautifulSoup(rwqe.content.decode('gbk'),'html.parser')

hk=vdq.find_all('a')for f inhk:

ddo=f.get('href')

kio=re.findall('/local/.*.html',str(ddo))for g inkio:

url='http://expku.com'.strip()+g

uk.append(url)

yf=list(set(uk))for c inyf:

rtq=requests.get(url=c,headers=headers)

vdq=BeautifulSoup(rtq.content.decode('gbk'),'html.parser')for a in vdq.find_all('h1'):print(a.get_text(),'',rtq.url)print(a.get_text(), '', rtq.url,file=open('exploitlocal.txt','a'))#while '' in neirons:

#neirons.remove('')

#while ' ' in neirons:

#neirons.remove(' ')

#urlsd.remove(urlsd[0])#rd=list(set(urlsd))

#for q in range(0,len(rd)):

#print(neirons[q],rd[q])

if __name__ == '__main__':

main()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值