Python 网站后台扫描脚本

Python  网站后台扫描脚本

 

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
#!/usr/bin/python
#coding=utf-8
import sys
import urllib
import time
url = "http://123.207.123.228/"
txt = open (r "C:\Users\ww\Desktop\houtaiphp.txt" , "r" )
open_url = []
all_url = []
def search_url(url,txt):
     with open (r "C:\Users\ww\Desktop\houtaiphp.txt" , "r" ) as f :
         for each in f:
             each = each.replace( '\n' ,'')
             urllist = url + each
             all_url.append(urllist)
             print ( "查找:" + urllist + '\n' )
             try :
                 req = urllib.urlopen(urllist)
                 if req.getcode() = = 200 :
                     open_url.append(urllist)
                 if req.getcode() = = 301 :
                     open_url.append(urllist)
             except :
                 pass
def main():
     search_url(url,txt)
     if open_url:
         print ( "后台地址:" )
         for each in open_url:
             print ( "[+]" + each)
     else :
         print ( "没有找到网站后台" )
if __name__ = = "__main__" :
     main()

 

 

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
#!/usr/bin/python
#coding=utf-8
import sys
import urllib
import time
url = "http://123.207.123.228/"
txt = open (r "C:\Users\ww\Desktop\houtaiphp.txt" , "r" )
open_url = []
all_url = []
def search_url(url,txt):
     with open (r "C:\Users\ww\Desktop\houtaiphp.txt" , "r" ) as f :
         for each in f:
             each = each.replace( '\n' ,'')
             urllist = url + each
             all_url.append(urllist)
             handle_url(urllist)
 
def handle_url(urllist):
     print ( "查找:" + urllist + '\n' )
     try :
         req = urllib.urlopen(urllist)
         if req.getcode() = = 200 :
             open_url.append(urllist)
         if req.getcode() = = 301 :
             open_url.append(urllist)
     except :
         pass
 
def main():
     search_url(url,txt)
     if open_url:
         print ( "后台地址:" )
         for each in open_url:
             print ( "[+]" + each)
     else :
         print ( "没有找到网站后台" )
if __name__ = = "__main__" :
     main()

 

 

 

师傅让我多看看-->多线程

这里就加个多线程吧。

复制代码
#!/usr/bin/python
#coding=utf-8
import sys
import urllib
import time
import threading
url = "http://123.207.123.228/"
txt = open(r"C:\Users\ww\Desktop\houtaiphp.txt","r")
open_url = []
all_url = []
threads = []
def search_url(url,txt):
    with open(r"C:\Users\ww\Desktop\houtaiphp.txt","r") as f :
        for each in f:
            each = each.replace('\n','')
            urllist = url+each
            all_url.append(urllist)
def handle_url(urllist):
    print("查找:"+urllist+'\n')
    try:
        req = urllib.urlopen(urllist)
        if req.getcode() == 200:
            open_url.append(urllist)
        if req.getcode() == 301:
            open_url.append(urllist)
    except:
        pass

def main():
    search_url(url,txt)
    for each in all_url:
        t = threading.Thread(target = handle_url,args=(each,))
        threads.append(t)
        t.start()
    for t in threads:
        t.join()
    if open_url:
        print("后台地址:")
        for each in open_url:
            print("[+]"+each)
    else:
        print("没有找到网站后台")
if __name__ == "__main__":
    start = time.clock()
    main()
    end = time.clock()
    print("spend time is:%.3f seconds" %(end-start))
复制代码

多线程和没加线程的时间对比

 

 

 

--------------------------------------------------------------------------------------------------------------------------------------------------

 

利用zoomeye搜索

调用ZoomEye API获取信息

主要涉及模块urllib,json,os模块。

复制代码
# coding: utf-8
import os
import requests
import json
 
access_token = ''
ip_list = []
 
def login():
    """
        输入用户米密码 进行登录操作
    :return: 访问口令 access_token
    """
    user = raw_input('[-] input : username :')
    passwd = raw_input('[-] input : password :')
    data = {
        'username' : user,
        'password' : passwd
    }
    data_encoded = json.dumps(data)  # dumps 将 python 对象转换成 json 字符串
    try:
        r = requests.post(url = 'https://api.zoomeye.org/user/login',data = data_encoded)
        r_decoded = json.loads(r.text) # loads() 将 json 字符串转换成 python 对象
        global access_token
        access_token = r_decoded['access_token']
    except Exception,e:
        print '[-] info : username or password is wrong, please try again '
        exit()
 
def saveStrToFile(file,str):
    """
        将字符串写如文件中
    :return:
    """
    with open(file,'w') as output:
        output.write(str)
 
def saveListToFile(file,list):
    """
        将列表逐行写如文件中
    :return:
    """
    s = '\n'.join(list)
    with open(file,'w') as output:
        output.write(s)
 
def apiTest():
    """
        进行 api 使用测试
    :return:
    """
    page = 1
    global access_token
    with open('access_token.txt','r') as input:
        access_token = input.read()
    # 将 token 格式化并添加到 HTTP Header 中
    headers = {
        'Authorization' : 'JWT ' + access_token,
    }
    # print headers
    while(True):
        try:
             
            r = requests.get(url = 'https://api.zoomeye.org/host/search?query="phpmyadmin"&facet=app,os&page=' + str(page),
                         headers = headers)
            r_decoded = json.loads(r.text)
            # print r_decoded
            # print r_decoded['total']
            for x in r_decoded['matches']:
                print x['ip']
                ip_list.append(x['ip'])
            print '[-] info : count ' + str(page * 10)
 
        except Exception,e:
            # 若搜索请求超过 API 允许的最大条目限制 或者 全部搜索结束,则终止请求
            if str(e.message) == 'matches':
                print '[-] info : account was break, excceeding the max limitations'
                break
            else:
                print  '[-] info : ' + str(e.message)
        else:
            if page == 10:
                break
            page += 1
 
def main():
    # 访问口令文件不存在则进行登录操作
    if not os.path.isfile('access_token.txt'):
        print '[-] info : access_token file is not exist, please login'
        login()
        saveStrToFile('access_token.txt',access_token)
 
    apiTest()
    saveListToFile('ip_list.txt',ip_list)
 
if __name__ == '__main__':
    main()
复制代码

 

上面的脚本是搜索 phpmyadmin 的。搜索得到的 IP 会保存在同路径下的 ip_list.txt 文件。

但是搜索到的 ip 并不是都能够访问的,所以这里写个了识别 phpmyadmin 的脚本,判断是否存在,是则输出。

复制代码
#!/usr/bin/python
#coding=utf-8
import sys
import time
import requests
headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0"}##浏览器请求头
open_url = []
all_url = []
payloa = 'http://'
payload = '/phpmyadmin/index.php'
def search_url():
    with open(r"C:\Users\ww\Desktop\ip_list.txt","r") as f :
        for each in f:
            each = each.replace('\n','')
            urllist = payloa+each+payload
            all_url.append(urllist)
            handle_url(urllist)
def handle_url(urllist):
    #print('\n'+urllist)
    #print '----------------------------'
    try:
        start_htm = requests.get(urllist,headers=headers)
        #print start_htm
        if start_htm.status_code == 200:
            print '*******************************************'
            print urllist
    except:
        pass
if __name__ == "__main__":
    search_url()
复制代码

 

 

加个多线程,毕竟工作量很大。

复制代码
#!/usr/bin/python
#coding=utf-8
import sys
import time
import requests
import threading
headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0"}##浏览器请求头
open_url = []
all_url = []
threads = [] 
payloa = 'http://'
payload = '/phpmyadmin/index.php'
def search_url():
    with open(r"C:\Users\ww\Desktop\ip_list.txt","r") as f :
        for each in f:
            each = each.replace('\n','')
            urllist = payloa+each+payload
            all_url.append(urllist)
            #handle_url(urllist)
def handle_url(urllist):
    #print('\n'+urllist)
    #print '----------------------------'
    try:
        start_htm = requests.get(urllist,headers=headers)
        #print start_htm
        if start_htm.status_code == 200:
            print '*******************************************'
            print urllist
    except:
        pass
def main():
    search_url()
    for each in all_url:
        t = threading.Thread(target=handle_url,args=(each,))
        threads.append(t)
        t.start()
    for t in threads:
        t.join()
if __name__ == "__main__":
    start = time.clock()
    main()
    end = time.clock()
    print("spend time is %.3f seconds" %(end-start))
复制代码

 

 

这下就方便了许多。

任重而道远!

转载于:https://www.cnblogs.com/hnsya/p/9364896.html

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值