趁着程序员节日,发一篇赢取狗头嘻嘻
直接上代码了,有时间再来写文字
借鉴自猪猪侠的wydomain工具
# encoding: utf-8
"""内网子域名解析,搜索内网需要设置域名解析服务器
author:tomator
1|DNS解析
2|从crossdomain.xml文件中提取
query(self, qname, rdtype=1, rdclass=1, tcp=False, source=None, raise_on_no_answer=True, source_port=0)
"""
import time
import re
import os
import sys
import json
import queue as Queue
import random
import threading
import dns.query
import dns.resolver
import dns.rdatatype
import requests
import re
#####设置域名解析服务器
# 内网域名解析服务器
# nameservers =[
# '10.26.30.148'
# ]
# 公网域名解析服务器
nameservers = [
'119.29.29.29', '182.254.116.116',
'8.8.8.8', '8.8.4.4',
'180.76.76.76',
'1.2.4.8', '210.2.4.8',
'101.226.4.6', '218.30.118.6',
'8.26.56.26', '8.20.247.20'
]
class Domain(object):
"""docstring for Domain base class"""
def __init__(self, timeout=5):
self.recursion = {}
self.resolver = dns.resolver.Resolver()
# 自定义个DNS服务器进行解析
self.resolver.nameservers = [random.choice(nameservers), random.choice(nameservers)]
if timeout:
self.resolver.timeout = timeout
self.resolver.lifetime = timeout
def get_type_name(self, typeid):
return dns.rdatatype.to_text(typeid)
def get_type_id(self, name):
return dns.rdatatype.from_text(name)
def parser(self, answer):
"""result relationship only two format
@domain domain name
@address ip address
dns解析类型说明:
A记录,将主机名转换成IP地址;
MX记录,邮件交换记录,定义邮件服务器的域名;
CNAME记录,指别名记录,实现域名间的映射;
NS记录,标记区域的域名服务器及授权子域;
PTR记录,反向解析,与A记录相反,将IP转换成主机名;
SOA记录,SOA标记,一个起始授权区的定义。
"""
result = {}
for rrsets in answer.response.answer:
for item in rrsets.items:
# item格式: newzone.com. 3600 IN A 10.26.30.133
rdtype = self.get_type_name(item.rdtype)
if rdtype == "A":
if result.has_key(rdtype):
result[rdtype].append(item.address)
else:
result[rdtype] = [item.address]
return result
def query(self, target, rdtype):
try:
answer = self.resolver.query(target, rdtype)
return self.parser(answer)
except dns.resolver.NoAnswer:
return None # catch the except, nothing to do
except dns.resolver.NXDOMAIN:
return None # catch the except, nothing to do
except dns.resolver.Timeout:
# timeout retry
print(target, rdtype, '<timeout>')
except Exception as e:
pass
def brute(self, target, ret=False):
"""domain brute force fuzz
@param target burte force, domain name arg
@param ret return result flag
"""
try:
# return_flag set false
if not ret:
# print("targ",target)
if self.resolver.query(target, 'A'):
return True
else:
return self.query(target, 'A')
except dns.resolver.NoAnswer:
return False
except dns.resolver.NXDOMAIN:
return False
except dns.resolver.Timeout:
return self.again_brute(target) # timeout retry
# return self.brute(target)
except Exception as e:
return False
# return self.again_brute(target)
def again_brute(self,target):
# timeout retry
try :
if self.resolver.query(target, 'A'):
return True
else:
return False
except Exception as e:
return False
def extensive(self, target):
# 泛域名解析的处理,通过解析确定不存在的子域名得到的解析结果,与其他解析结果进行对比,如果相同则定为泛解析,不计入最后的结果
(ehost, esets) = ['wyspider{0}.{1}'.format(i, target) for i in range(3)], []
for host in ehost:
try:
record = self.query(host, 'A')
if record is not None:
esets.extend(record['A'])
except Exception:
pass
return esets
class DomainFuzzer(object):
"""docstring for DomainFuzzer with brute force"""
def __init__(self, target, dict_file='wydomain.csv', timeout=5):
self.target = target
self.dict_file = dict_file
self.resolver = Domain(timeout=timeout)
self.dict = None
def run(self, thread_cnt=16):
# 获取字典中的子域名枚举表
self.dict = self.getLines(self.dict_file)
# 新建两个队列,用于存放子域名结果和枚举列表
iqueue, oqueue = Queue.Queue(), Queue.Queue()
# 遍历子域名列表,拼接新的新的子域名
# url = []
for line in self.dict:
iqueue.put('.'.join([str(line),str(self.target)]))
# url.append([str(line),'.'.join([str(line),str(self.target)])])
# save_result2json("tt.json",url)
# 泛解析处理
extensive, threads = self.resolver.extensive(self.target), []
for i in range(thread_cnt):
threads.append(self.bruteWorker(self, iqueue, oqueue, extensive))
for t in threads: t.start()
for t in threads: t.join()
while not oqueue.empty():
yield oqueue.get()
@staticmethod
def getLines(filename):
with open(filename, 'r') as fd:
for line in fd.readlines():
yield line.rstrip()
class bruteWorker(threading.Thread):
"""domain name brute force threading worker class
@param dfuzzer DomainFuzzer base class
@param iqueue Subdomain dict Queue()
@param oqueue Brutefoce result Queue()
@param extensive Doman extensive record sets
"""
def __init__(self, dfuzzer, iqueue, oqueue, extensive):
threading.Thread.__init__(self)
self.queue = iqueue
self.output = oqueue
self.dfuzzer = dfuzzer
self.extensive = extensive
def run(self):
try:
while not self.queue.empty():
sub = self.queue.get_nowait()
# not_extensive
# 不存在泛解析
if len(self.extensive) == 0:
# print("extensive 0",sub)
if self.dfuzzer.resolver.brute(sub):
# 若解析成功,则将猜测的子域名结果放入队列中
# print(sub)
self.output.put(sub)
else:
# 存在泛解析
print("exit extensive ")
rrset = self.dfuzzer.resolver.brute(sub, ret=True)
if rrset is not None:
for answer in rrset['A']:
if answer not in self.extensive:
self.output.put(sub)
except Exception as e:
print(e)
def run(target,dic_file,out_file,timeout=5,thread=5):
domain = target
thread_cnt = int(thread)
timeout = int(timeout)
dict_file = dic_file
out_file = out_file
if not domain:
print('use domain like: aliyun.com')
sys.exit(1)
#返回脚本文件路径
script_path = os.path.dirname(os.path.abspath(__file__))
_cache_path = os.path.join(script_path, 'result\{0}'.format(domain))
# 针对每个domain新建一个目录用于存放结果
if not os.path.exists(_cache_path):
# os.makedirs(_cache_path, 0777)
os.makedirs(_cache_path)
# 子域名集合
subdomains = []
dnsfuzz = DomainFuzzer(target=domain, dict_file=dict_file, timeout=timeout)
# 从结果队列中取值
for subname in dnsfuzz.run(thread_cnt=thread_cnt):
subdomains.append(subname)
print("子域名爆破结果:", len(set(subdomains)))
# 从crossdomain.xml中提取
subdomains = crossdomain_xml_find(target, subdomains)
print("总共获取的子域名个数:",len(set(subdomains)))
# 存入json文件中
_cache_file = os.path.join(_cache_path, out_file)
save_result2txt(_cache_file, set(subdomains))
print("result save in %s"%_cache_file)
# 将子域名结果以json格式保存
def save_result2json(filename, args):
with open(filename, 'w') as f:
json.dump(args, f, indent=4)
# 将子域名结果以txt格式保存
def save_result2txt(filename, args):
with open(filename, 'w') as f:
for i in args:
f.write(i+"\n")
def crossdomain_xml_find(target, subdomains):
# 从crossdomain.xml中提取
http_target = "http://www." + target
https_target = "https://www." + target
for t in [http_target, https_target]:
try:
cross_url = t + "/crossdomain.xml"
res = requests.get(cross_url)
# domain="sports.163.com"
pattern = re.compile(r'<allow-access-from domain="(\S*)?"')
c_subdomains = pattern.findall(res.text)
for i in c_subdomains:
if '*' not in i:
subdomains.append(i)
print("xml:",i)
except:
pass
return subdomains
if __name__ == '__main__':
# "aliyun.com" newzone.com 163.com
target="weibo.com"
# "default.csv" wydomain.csv subnames.txt
dic_file="wydomain.csv"
out_file="wytest4"+".txt"
# ".json"
thread = 8
timeout = 10
run(target,dic_file,out_file)