# coding=utf-8 # Author: zijian # 2023/9/13 15:21 import threading from queue import Queue import requests import time from fake_user_agent import user_agent class DirScan(threading.Thread): def __init__(self, queue, name=None): """ 初始化 传递了一个队列进来 :param queue: 队列里面放的就是我们需要去请求的路径 :param name: """ super().__init__(name) self.__queue = queue def run(self): # 从队列里面去取数据, 什么时候需要去取数据?只要队列不为空就去取 while not self.__queue.empty(): try: # 从队列里面取出一个路径 web_dir = self.__queue.get() headers = { "User-Agent": user_agent() } # 需要去访问 res = requests.request(method='post',url=web_dir, headers=headers, timeout=3) if res.status_code in [200, 403, 302]: print(f"[**]{web_dir}...{res.status_code}") except Exception as e: pass def start(url, type, count): """ 启动扫描的方法 :param url: 需要扫描的网站 :param type: 网站的类型 :param count: 线程的数量 :return: """ # 准备一个队列 queue = Queue() # 先去读取字典 dict_path = f"./dict/{type}.txt" # 读取字典 with open(dict_path) as f: for dir in f: dir_name = dir.rstrip("\n") queue.put(f'{url}{dir_name}') # 准备容器 threads = [] # 创建线程 for i in range(count): threads.append(DirScan(queue)) for t in threads: t.start() for t in threads: t.join() if __name__ == '__main__': start_time = time.time() url = "http://www.xxx.com/" type = "php" thread_count = 50 start(url, type, thread_count) end_time = time.time() print(f"消耗的时间:{end_time-start_time}")
02-17