""" 大部分的scrapy下载中间件都不要 """
import logging
import random
from concurrent.futures import ThreadPoolExecutor
import requests
import scrapy
from requests.adapters import HTTPAdapter
from scrapy.http import TextResponse
from twisted.internet.defer import Deferred
from urllib3.util.ssl_ import create_urllib3_context
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s [%(name)s:%(module)s:%(funcName)s:%(lineno)d] %(levelname)s: %(message)s')
class DESAdapter(HTTPAdapter):
""" JA3 """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
origin_ciphers = 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES'
ciphers = origin_ciphers.split(':')
random.shuffle(ciphers)
self.ciphers = ':'.join(ciphers) + ':!aNULL:!eNULL:!MD5'
def get_connection(self, *args, **kwargs):
obj = super(DESAdapter, self).get_connection(*args, **kwargs)
obj.conn_kw['ssl_context'] = create_urllib3_context(ciphers=self.ciphers)
return obj
class RequestsDownloadMiddleware:
def __init__(self):
self.executor = ThreadPoolExecutor(max_workers=7)
def process_request(self, request, spider):
d = Deferred()
self.executor.submit(self.defer, d, request, spider)
return d
def defer(self, deferred: Deferred, request: scrapy.Request, spider: scrapy.Spider):
try:
resp = self.download(request)
response = TextResponse(
url=request.url,
status=resp.status_code,
headers=resp.headers,
body=resp.content,
request=request
)
except Exception as e:
logger.warning(e)
response = TextResponse(url=request.url, status=500, request=request)
deferred.callback(response)
def download(self, request) -> requests.Response:
session = self.create_session({})
return session.request(method=request.method, url=request.url, data=request.body,
headers=dict(request.headers.to_unicode_dict()), timeout=10)
def create_session(self, proxies) -> requests.Session:
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'no-cache',
'Connection': 'close',
'User-Agent': f'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{random.randint(105, 128)}.0.0.0 Safari/537.36',
}
s = requests.Session()
s.headers.update(headers)
s.proxies.update(proxies)
s.verify = False
s.mount('https://', DESAdapter())
return s
Scrapy使用requests下载器
最新推荐文章于 2024-10-03 09:02:12 发布