Scrapy框架
学习目标
- 理解scrapy框架。
- 学会spider爬虫的编写。
- 学会Crawlspider爬虫编写。
- 学会中间件的编写。
- 学会pipeline保存数据。
- 学会将Scrapy结合selenium一起使用。
- 学会在Scrapy中使用IP代理。
Scrapy框架的介绍
安装scrapy
scrapy框架架构
创建scrapy项目
- 创建项目:scrapy startproject 【项目名称】
- 创建爬虫:cd到项目中–>scrapy genspider 爬虫名称 域名
项目文件作用 - settings.py:用来配置爬虫。
- middlewares.py:用来定义中间件。
- items.py:用来提前定义好需要下载的数据字段。
- pipelines.py:用来保存数据。
- scrapy.py:用来配置项目的。
scrapy框架爬取古诗文网
创建scrapy项目
创建爬虫
settings.py
关闭robots协议
添加请求头信息
打开pipelines管道,用于保存数据
gsww_spider.py
# -*- coding: utf-8 -*-
import scrapy
from ..items import GswwItem
class GswwSpiderSpider(scrapy.Spider):
name = 'gsww_spider'
allowed_domains = ['gushiwen.org']
start_urls = ['https://www.gushiwen.org/default_1.aspx']
def myprint(self,value):
print('='*30)
print(value)
print('='*30)
def parse(self, response):
gsw_divs = response.xpath("//div[@class='left']/div[@class='sons']") #返回一些SelectorList对象
for gsw_div in gsw_divs:
# title = gsw_div.xpath(".//b/text()").getall() #将SelectorList里面的数据全部提取出来
title = gsw_div.xpath(".//b/text()").get() #将SelectorList里面的第一个数据提取出来
source = gsw_div.xpath(".//p[@class='source']/a/text()").getall()
# self.myprint(title)
dynasty = source[0]
author = source[1]
content_list = gsw_div.xpath(".//div[@class='contson']//text()").getall()
content = "".join(content_list).strip()
# self.myprint(content)
item = GswwItem(title=title,dynasty=dynasty,author=author,content=content) #将数据打包到pipelines
yield item #将数据一个一个发送给pipelines保存
next_href = response.xpath("//a[@id='amore']/@href").get() #/default_2.aspx
if next_href:
next_url = response.urljoin(next_href) #将href加入域名,https://www.gushiwen.org/default_2.aspx
request = scrapy.Request(next_url)
yield request #将这个request对象发送给调度器,调度器进行请求
items.py
添加保存信息字段
pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import json
class GswwPipeline(object):
def open_spider(self,spider): #当spider打开时调用
self.fp = open("古诗文.txt",'w',encoding='utf-8')
def process_item(self, item, spider):
self.fp.write(json.dumps(dict(item),ensure_ascii=False)+"\n")
return item
def close_spider(self,spider): #当spider关闭时调用
self.fp.close()
run.py
想要运行这个scrapy框架,只需要运行这个run.py文件即可。
from scrapy import cmdline
cmds = ["scrapy","crawl","gsww_spider"]
cmdline.execute(cmds)
CrawlSpider爬虫
作用:可以自定义规则,让scrapy自动的去爬取我们想要的链接,而不必跟spider类一样,手动的yield Request
提取两个类:
- LinkExtrator:用来定义需要爬取的url规则。
- Rule:用来定义这个url爬取后的处理方式。比如是否需要跟进,是否需要执行回调函数等。
Scrapy Shell
在命令行中,进入到项目所在的路径,然后:scrapy shell 链接
在这里面,可以先去写提取的规则,没有问题后,就可以把代码拷贝到项目中去,方便写代码。
异步保存MySQL数据
猎云网爬取
lyw_spider.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import LywItem
class LywSpiderSpider(CrawlSpider):
name = 'lyw_spider'
allowed_domains = ['lieyunwang.com']
start_urls = ['https://www.lieyunwang.com/latest/p1.html']
rules = (
Rule(LinkExtractor(allow=r'/latest/p\d+\.html'), follow=True), #负责爬取页面的url,不需要爬取里面的详情页面
Rule(LinkExtractor(allow=r'/archives/\d+'), callback='parse_detail',follow=False), #负责详情页面,不需要跟进
)
def parse_detail(self, response):
# print('='*30)
# print(response.url) #打印请求到的url
# print('=' * 30)
titlelist = response.xpath("//h1[@class='lyw-article-title']/text()").getall()
title = "".join(titlelist).strip()
pub_time = response.xpath("//h1[@class='lyw-article-title']/span//text()").getall()
author = response.xpath("//a[contains(@class,'author-name')]//text()").get()
content = response.xpath("//div[@class='main-text']").get()
origin_url = response.url
item = LywItem(title=title,pub_time=pub_time,author=author,content=content,origin_url=origin_url)
yield item
添加数据库配置
pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from twisted.enterprise import adbapi
class LywPipeline(object):
#2
def __init__(self,mysql_config):
#创建连接池
self.dbpool = adbapi.ConnectionPool(
mysql_config['DRIVER'],
host = mysql_config['HOST'],
port = mysql_config['PORT'],
user = mysql_config['USER'],
password = mysql_config['password'],
db = mysql_config['DATABASE'],
charset = 'utf8'
)
#1
@classmethod
def from_crawler(cls,crawler):
#只要重写了from_crawler方法,那么以后创建对象的时候,就会调用这个方法来获取pipeline对象
mysql_config = crawler.settings['MYSQL_CONFIG'] #获取setting中的配置信息
return cls(mysql_config) #相当于LywPipeline(mysql_config)
#3
def process_item(self, item, spider): #当每个item传入进来时调用这个方法
result = self.dbpool.runInteraction(self.insert_item,item) #使用runInteraction方法来运行插入sql语句的insert_item函数,同时该函数会获得一个游标cursor
#错误监听,如果出现了错误,就调用下面这个函数,打印错误
result.addErrback(self.insert_error)
return item
#4
def insert_item(self,cursor,item):
sql = "insert into lyw_data(id,title,author,pub_time,content,origin_url) values(null,%s,%s,%s,%s,%s)"
args = (item['title'],item['author'],item['pub_time'],item['content'],item['origin_url'])
cursor.execute(sql,args)
def insert_error(self,failure):
print('='*30)
print(failure)
print('='*30)
#5
#This method is called when the spider is closed.
def close_spider(self,spider):
self.dbpool.close()
实现GitHub自动登录
github_spider
# -*- coding: utf-8 -*-
import scrapy
import time
class GithubSpiderSpider(scrapy.Spider):
name = 'github_spider'
allowed_domains = ['github.com']
start_urls = ['https://github.com/login']
def parse(self, response):
timestamp = str(int(time.time()*1000))
authenticity_token = response.xpath("//input[@name='authenticity_token']/@value").get()
timestamp_secret = response.xpath("//input[@name='timestamp_secret']/@value").get()
form_data = {
'commit': 'Sign in',
'utf8':'✓',
'authenticity_token': authenticity_token,
'ga_id':'1804287830.1582287555',
'login':'wu*******',
'password':'wuy*******',
'webauthn-support': 'supported',
'webauthn-iuvpaa-support': 'supported',
'timestamp': timestamp,
'timestamp_secret':timestamp_secret,
}
#第一种提交表单的方式
yield scrapy.FormRequest("https://github.com/session",formdata=form_data,callback=self.after_login)
#第二种提交表单的方式
# yield scrapy.FormRequest.from_response(response,formdata={
# 'login': 'wu******',
# 'password': 'w*********'
# },callback=self.after_login)
def after_login(self,response):
print('='*30)
yield scrapy.Request("https://github.com/settings/profile",callback=self.visit_profile)
def visit_profile(self,response):
print('-'*30)
with open('github_profile.html','w',encoding='utf-8') as f:
f.write(response.text)
下载图片文件
注意:其中item中的两个字段image_urls和images是必须的
zcool_spider.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders.crawl import CrawlSpider,Rule
from scrapy.linkextractors import LinkExtractor
from ..items import ZcoolItem
class ZcoolSpiderSpider(CrawlSpider):
name = 'zcool_spider'
allowed_domains = ['zcool.com.cn']
start_urls = ['https://www.zcool.com.cn/discover/0!3!0!0!0!!!!2!-1!1']
rules = (
#翻页的url
Rule(LinkExtractor(allow=r'.+0!3!0!0!0!!!!2!-1!\d+'), follow=True),
#详情页面的url
Rule(LinkExtractor(allow=r'.+/work/.+html'), follow=False,callback="parse_detail")
)
def parse_detail(self, response):
image_urls = response.xpath("//div[contains(@class,'work-show-box')]//img/@src").getall()
title_list = response.xpath("//div[@class='details-contitle-box']/h2/text()").getall()
title = "".join(title_list).strip()
item = ZcoolItem(title=title,image_urls=image_urls)
print('='*30)
yield item
items.py
settings.py
添加IMAGES_STORE保存路径
pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.images import ImagesPipeline
from zcool import settings
import os
import re
class ZcoolPipeline(ImagesPipeline):
# def process_item(self, item, spider):
# return item
#将item绑定到request上,下载图片之前调用
def get_media_requests(self, item, info):
media_requests = super(ZcoolPipeline, self).get_media_requests(item,info)
# print('*'*30,media_requests)
# print('item',item)
# print('item_dict',dict(item))
for media_request in media_requests:
media_request.item = item
return media_requests
#This method is called once per downloaded item. It returns the download path of the file originating from the specified response.
def file_path(self, request, response=None, info=None):
origin_path = super(ZcoolPipeline, self).file_path(request,response,info)
# print('origin_path:',origin_path)
# print('request_item_title:',request.item['title'])
title = request.item['title']
title = re.sub(r'[\\/:\*\?"<>\|]',"",title)
# print('title',title)
save_path = os.path.join(settings.IMAGES_STORE,title)
# print(save_path)
if not os.path.exists(save_path):
os.mkdir(save_path)
image_name = origin_path.replace("full/","")
# print('-'*30,image_name)
# print('save_path:',os.path.join(save_path,image_name))
return os.path.join(save_path,image_name)