信息搜集之被动信息搜集

被动信息搜集:不与目标主机进行直接交互,通常根据搜索引擎或者社交等方式间接获取目标主机的信息。

DNS解析

1.1IP查询

查询域名对应的IP

import socket
import argparse

def doamin_parse():
    usage = 'usage: python %(prog)s domain'
    parser = argparse.ArgumentParser(usage=usage)
    parser.add_argument('domain')

    options = parser.parse_args()

    domain = options.domain

    return domain

if __name__ == '__main__':
    domain = doamin_parse()
    ip = socket.gethostbyname(domain)
    print(ip)

1.2Whois查询

whois查询域名注册信息

安装python-whois

from whois import whois
import argparse

def doamin_parse():
    usage = 'usage: python %(prog)s domain'
    parser = argparse.ArgumentParser(usage=usage)
    parser.add_argument('domain')

    options = parser.parse_args()

    domain = options.domain

    return domain

if __name__ == '__main__':
    domain = doamin_parse()
    data = whois(domain)
    print(data)

1.3子域名查询

第三方软件

Onefoall、sublist3r、云悉、layer子域名挖掘机、御剑子域名扫描、dnsenum

python利用bing进行子域名查询

import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import sys

def bing_search(site, pages):
    Subdomain = [] # 以列表形式存储子域名
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Referer': 'https://www.bing.com/',
        'cookie': 'MUID=2D75DFAC11BE6E2C17A5CE1110BF6F5D; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=6E08C7F335954E4A81F836E094D9EE88&dmnchg=1;'
                  ' _UR=cdxcls=0&QS=0&TQS=0; SRCHUSR=DOB=20230208&T=1701833557000; _Rwho=u=d; _SS=SID=0E3DBDDEA58566841768AE01A43A679F&R=0&RB=0&GB=0&RG=200&RP=0; ipv6=hit=1701837160490&t=6;'
    }

    for i in range(1, int(pages)+1):
        url = "https://www.bing.com/search?q=site%3a" + site + "&go=search&qs=ds&first=" + str((int(i)-1)*10) + "&FORM=PERE"
        html = requests.get(url, stream=True, headers=headers)
        soup = BeautifulSoup(html.content, 'html.parser')
        job_bt = soup.findAll('h2')

        for i in job_bt:
            link = i.find('a')
            if link:
                href = link['href']
                domain = str(urlparse(href).scheme + "://" + urlparse(href).netloc)
                if domain in Subdomain:
                    pass
                else:
                    Subdomain.append(domain)
                    print(domain)

if __name__ == '__main__':
    if len(sys.argv) == 3:
        site = sys.argv[1]
        page = sys.argv[2]
    else:
        print("usage: %s baidu.com 10" % sys.argv[0])
        sys.exit(-1)

    Subdomain = bing_search(site, page)

1.4目录扫描

dirsearch

1.4C段扫描

nbtscan、netdiscover

1.5邮件获取

#-*- coding:utf-8 -*-
import sys
import getopt
import requests
from bs4 import BeautifulSoup
import re
import time
import threading

#banner信息
def banner():
    print('\033[1;34m########################################################################################\033[0m\n'
          '\033[1;34m######################################\033[1;32m邮件爬取\033[1;34m#####################################\033[0m\n'
          '\033[1;34m########################################################################################\033[0m\n')
#使用规则
def usage():
    print('-h: --help 帮助;')
    print('-u: --url  域名;')
    print('-p: --pages 页数;')
    print('eg: python -u "www.baidu.com" -p 100'+'\n')
    sys.exit()
##未授权函数检测

#主函数,传入输入参数进入
def start(argv):
    url = ""
    pages = ""
    if len(sys.argv) < 2:
        print("-h 帮助信息;\n")
        sys.exit()
    #定义异常处理
    try:
        banner()
        opts,args = getopt.getopt(argv,"-u:-p:-h")
    except getopt.GetoptError:
        print('Error an argument!')
        sys.exit()
    for opt,arg in opts:
        if opt == "-u":
            url = arg
        elif opt == "-p":
            pages = arg
        elif opt == "-h":
            print(usage())
        threader(url, pages)


class MyThread(threading.Thread):
    def __init__(self, func, args=()):
        super(MyThread, self).__init__()
        self.func = func
        self.args = args

    def run(self):
        if self.args[1] < 1:
            pass
        else:
            self.result = self.func(*self.args)  # 在执行函数的同时,把结果赋值给result,然后通过get_result函数获取返回的结果

    def get_result(self):
        try:
            return self.result
        except Exception as e:
            return None


def threader(url,pages):
    launcher(url,pages)

    #漏洞回调函数cd
def launcher(url,pages):
    if len(pages)< 1:
        pass
    else:
        for page in range(1,int(pages)+1):
            keyword(url,page)


def keyword(url,page):
    threads = []
    email_sum = []
    email_num = []
    key_words = ['email', 'mail', 'mailbox', '邮件', '邮箱', 'postbox']
    for key_word in key_words:
        t = MyThread(emails, args=(url, page,key_word))
        t.start()
        threads.append(t)
    for t in threads:
        t.join()  # 一定执行join,等待子进程执行结束,主进程再往下执行
        email_num.append(t.get_result())
    for email in email_num:
        for list in email:
            if list in email_sum:
                pass
            else:
                email_sum.append(list)
                print(list)

def emails(url,page,key_word):
    bing_emails = bing_search(url, page, key_word)
    baidu_emails = baidu_search(url, page, key_word)
    sum_emails = bing_emails + baidu_emails
    return sum_emails



def bing_search(url,page,key_word):
    referer = "http://cn.bing.com/search?q=email+site%3abaidu.com&qs=n&sp=-1&pq=emailsite%3abaidu.com&first=1&FORM=PERE1"
    conn = requests.session()
    bing_url = "http://cn.bing.com/search?q=" + key_word + "+site%3a" + url + "&qs=n&sp=-1&pq=" + key_word + "site%3a" + url + "&first=" + str(
        (page-1)*10) + "&FORM=PERE1"
    conn.get('http://cn.bing.com', headers=headers(referer))
    r = conn.get(bing_url, stream=True, headers=headers(referer), timeout=8)
    emails = search_email(r.text)
    return emails

def baidu_search(url,page,key_word):
    email_list = []
    emails = []
    referer = "https://www.baidu.com/s?wd=email+site%3Abaidu.com&pn=1"
    baidu_url = "https://www.baidu.com/s?wd="+key_word+"+site%3A"+url+"&pn="+str((page-1)*10)
    conn = requests.session()
    conn.get(referer,headers=headers(referer))
    r = conn.get(baidu_url, headers=headers(referer))
    soup = BeautifulSoup(r.text, 'lxml')
    tagh3 = soup.find_all('h3')
    for h3 in tagh3:
        href = h3.find('a').get('href')
        try:
            r = requests.get(href, headers=headers(referer),timeout=8)
            emails = search_email(r.text)
        except Exception as e:
            pass
        for email in emails:
            email_list.append(email)
    return email_list

def search_email(html):
    emails = re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+",html,re.I)
    return emails

def headers(referer):
    headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
               'Accept': '*/*',
               'Accept-Language': 'en-US,en;q=0.5',
               'Accept-Encoding': 'gzip,deflate',
               'Referer': referer
               }
    return headers


if __name__ == '__main__':
    #定义异常
    try:
        start(sys.argv[1:])
    except KeyboardInterrupt:
        print("interrupted by user, killing all threads...")
    

  • 10
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值