scrapy 切换ip和useragent

scrapy middleware设置,注意设置setting文件

DOWNLOADER_MIDDLEWARES = {
   '爬虫名字.middlewares.RandomUserAgentMiddlware': 543,
   'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
}

middlewares.py文件编写类RandomUserAgentMiddlware


from scrapy import signals

from fake_useragent import UserAgent


class RandomUserAgentMiddlware(object):
    '''
    随机更换user-agent
    模仿并替换site-package/scrapy/downloadermiddlewares源代码中的
    useragent.py中的UserAgentMiddleware类
    '''

    def __init__(self, crawler):
        super(RandomUserAgentMiddlware, self).__init__()
        self.ua = UserAgent()
        # 可读取在settings文件中的配置,来决定开源库ua执行的方法,默认是random,也可是ie、Firefox等等
        self.ua_type = crawler.settings.get("RANDOM_UA_TYPE", "random")

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler)

    # 更换用户代理逻辑在此方法中
    def process_request(self, request, spider):
        def get_ua():
            return getattr(self.ua, self.ua_type)

        print(get_ua())
        request.headers.setdefault('User-Agent', get_ua())

ip同理


class IpProxyDownloadMiddleware(object):
    '''
    随机更换IP
    '''

    def __init__(self, crawler):
        super(IpProxyDownloadMiddleware, self).__init__()

    def getIP(self, ):
        num = 1
        while True:
            proxy = requests.get('http://127.0.0.1:5010/get/?type=http').json()['proxy']
            # 验证IP是否可用
            url = "http://ip-api.com/json/" + proxy.split(":")[0] + "?lang=zh-CN"
            try:
                data = requests.get(url, proxies={"http": proxy}, timeout=10, headers={
                    'Connection': 'close'})
                print('scrapy第%s次尝试验证代理ip %s' % (num, proxy))
                num += 1
                if data.status_code == 200  :
                    print('scrapy初始化代理ip %s成功' % proxy)
                    break
            except  :
                traceback.print_exc()
                print('重试')
                 
        return 'http://' + proxy

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler)

    # 更换用户代理逻辑在此方法中
    def process_request(self, request, spider):
        current_ip=random.choice(['http://45.144.3.208:59785' ])#这个ip不会允许scrapy链接
        request.meta['proxy'] = current_ip #self.getIP()
        print("scrapy.request:",current_ip)

    def process_response(self, request, response, spider):
        return response
### Scrapy 框架解决反爬虫策略的方法 #### 设置下载延迟 为了防止因频繁访问而被封禁,可以在 `settings.py` 文件中设置下载延迟。通过调整 `DOWNLOAD_DELAY` 参数来控制每次请求之间的间隔时间[^2]。 ```python # settings.py DOWNLOAD_DELAY = 3 # 单位为秒 ``` #### 使用 User-Agent 池 创建自定义中间件以实现随机切换浏览器标识符的功能。这有助于模拟不同类型的客户端发起请求,降低被检测的风险[^4]。 ```python import random class RandomUserAgentMiddleware(object): @classmethod def process_request(cls, request, spider): ua = random.choice(spider.settings.get('USER_AGENT_LIST')) if ua: request.headers.setdefault('User-Agent', ua) # 在 settings.py 中配置 USER_AGENT_LIST 并启用该中间件 USER_AGENT_LIST = [ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 ...", ... ] DOWNLOADER_MIDDLEWARES = { 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, 'your_project.middlewares.RandomUserAgentMiddleware': 400, } ``` #### 启用 IP 池 当目标站点对单一 IP 的访问频率有限制时,可以利用代理服务器轮换发送请求。同样需要构建相应的中间件并将其加入到项目配置里。 ```python from scrapy.exceptions import NotConfigured import random class ProxyMiddleware: def __init__(self, proxies=None): self.proxies = proxies or [] @classmethod def from_crawler(cls, crawler): if not crawler.settings.getbool('HTTP_PROXY_ENABLED'): raise NotConfigured return cls(crawler.settings.getlist('PROXY_LIST')) def process_request(self, request, spider): proxy = random.choice(self.proxies) request.meta['proxy'] = f"http://{proxy}" # 配置 settings.py 添加 PROXY_LIST HTTP_PROXY_ENABLED 开关 PROXY_LIST = ['ip:port', ] HTTP_PROXY_ENABLED = True DOWNLOADER_MIDDLEWARES.update({ 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None, 'your_project.middlewares.ProxyMiddleware': 100, }) ``` #### 处理 JavaScript 渲染页面 对于依赖于JavaScript动态加载数据的目标网页,则需引入Splash或其他类似的工具来进行渲染处理。 ```bash pip install scrapy-splash ``` 接着修改 Spider 类继承自 `scrapy.spiders.SplashSpider` 或者直接调用 SplashRequest 来代替普通的 Request 对象: ```python from scrapy_splash import SplashRequest def start_requests(self): yield SplashRequest(url='http://example.com', callback=self.parse, args={'wait': 0.5}, ) ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值