crawlspider示例和登录

  • 创建crawlspider
scrapy startproject wxapp
cd wxapp
scrapy genspider -t crawl wxappspider www.wxapp-union.com

wxappspider.py文件中:

# -*- coding: utf-8 -*-

'''微信小程序教程页面全部爬取'''
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from wxapp.items import WxappItem

class WxappSpiderSpider(CrawlSpider):
    name = 'wxapp_spider'
    allowed_domains = ['wxapp-union.com']
    start_urls = ['http://www.wxapp-union.com/portal.php?mod=list&catid=2&page=1']

    rules = (
        # 提取列表页,不用调用回调函数,因为列表页没有什么要爬的
        Rule(LinkExtractor(allow=r'.*?page=\d'), follow=True),
        # 提取具体详情页,调用回调函数,解析具体字段,不继续跟进爬取
        Rule(LinkExtractor(allow=r'.*?article-.*?\.html'),callback='parse_item',follow=False)
    )

    def parse_item(self, response):
        item = WxappItem()
        title = response.xpath('//h1[@class="ph"]/text()').extract()
        item['title'] = title
        yield item

pipelines.py文件中:


from scrapy.exporters import JsonLinesItemExporter

# 保存json文件中
class WxappPipeline(object):
    def __init__(self):
        self.file = open('wxapp.json','wb',)
        self.exporter = JsonLinesItemExporter(self.file,ensure_ascii=False,encoding='utf-8')
    def process_item(self, item, spider):
        self.exporter.export_item(item)
        return item

    def close_spider(self,item,spider):
        self.file.close()

item.py更改的地方:

ROBOTSTXT_OBEY = False
DOWNLOAD_DELAY = 1
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
    'Referer': 'http://www.wxapp-union.com/article-4527-1.html'
}
ITEM_PIPELINES = {
   'wxapp.pipelines.WxappPipeline': 300,
}

登录


'''登陆人人网
重写start_requests方法来实现登录'''

import scrapy


class RenrenSpider(scrapy.Spider):
    name = 'zhihu'
    allowed_domains = ['www.renren.com']
    start_urls = ['http://www.renren.com/']

    def start_requests(self):
        url= 'http://www.renren.com/PLogin.do'
        data = {
            'email':'','password':''
        }
        yield scrapy.FormRequest(url,formdata=data,callback=self.parse)


    def parse(self, response):
        pass
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
CrawlSpider是Scrapy框架中的一个爬虫类,用于爬取网站的链接并按照规则进行递归爬取。在爬取过程中,有些网站可能会设置反扒措施,如验证码、IP封禁等,这些问题需要在爬虫中进行处理。下面是一个处理验证码的中间件示例代码: ```python from scrapy import signals from scrapy.http import HtmlResponse from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC class CaptchaMiddleware(object): def __init__(self, driver_path): self.driver = webdriver.Chrome(executable_path=driver_path) self.wait = WebDriverWait(self.driver, 10) @classmethod def from_crawler(cls, crawler): middleware = cls(crawler.settings.get('DRIVER_PATH')) crawler.signals.connect(middleware.spider_closed, signal=signals.spider_closed) return middleware def process_request(self, request, spider): self.driver.get(request.url) try: captcha = self.wait.until(EC.presence_of_element_located((By.XPATH, '//input[@name="captcha"]'))) captcha_input = input('请输入验证码:') submit_button = self.driver.find_element_by_xpath('//button[@type="submit"]') captcha.send_keys(captcha_input) submit_button.click() return HtmlResponse(url=request.url, body=self.driver.page_source, request=request, encoding='utf-8', status=200) except: return HtmlResponse(url=request.url, request=request, status=500) def spider_closed(self, spider): self.driver.quit() ``` 这个中间件使用了Selenium来模拟浏览器操作,当遇到反扒验证码时,会弹出提示框要求输入验证码,输入正确后点击提交按钮,返回处理后的响应。需要注意的是,Selenium的使用需要安装对应的浏览器驱动,这里使用了Chrome浏览器驱动。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值