爬取后可配合httpx进行探测 把爬取到的域名保存在3.txt文件中 常用命令:httpx -l 3.txt -status-code -tech-detect -web-server -title -cdn -mc 200
import asyncio from pyppeteer import launch import re async def ziyu(uri): browser = await launch() page = await browser.newPage() await page.setUserAgent('Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/60.0') url = 'https://chaziyu.com/' + uri await page.goto(url) await page.evaluate('window.scrollBy(0, document.body.scrollHeight)') await asyncio.sleep(1) content = await page.content() ziyunming = re.findall('<a href="(.+?)"', content) for ziyu1 in ziyunming: if 'https://ipchaxun.com/' in ziyu1: new_ziyu1 = ziyu1.replace("https://ipchaxun.com/", "").rstrip("/") print('http://' + new_ziyu1) for ziyu2 in ziyunming: if 'https://chaolianjie.com/' in ziyu2: new_ziyu2 = ziyu2.replace("https://chaolianjie.com/#", "").rstrip("/") print('http://' + new_ziyu2) await browser.close() if __name__ == '__main__': uri = input('+ \033[35m请输入要查询的子域名$:\033[0m') asyncio.run(ziyu(uri))