Python爬虫框架Scrapy

Python爬虫框架Scrapy
性能相关
在编写爬虫时,性能的消耗主要在IO请求中,当单进程单线程模式下请求URL时必然会引起等待,从而使得请求整体变慢。

那么如何编写出高效的程序。

复制代码
import requests
def fetch_async(url):
response = requests.get(url)
return response

url_list = [‘http://www.github.com’, ‘http://www.bing.com’]

for url in url_list:
fetch_async(url)

复制代码
1.同步执行

复制代码
from concurrent.futures import ThreadPoolExecutor
import requests
def fetch_async(url):
response = requests.get(url)
return response

url_list = [‘http://www.github.com’, ‘http://www.bing.com’]
pool = ThreadPoolExecutor(5)
for url in url_list:
pool.submit(fetch_async, url)
pool.shutdown(wait=True)

复制代码
2.多线程执行

复制代码
from concurrent.futures import ThreadPoolExecutor
import requests
def fetch_async(url):
response = requests.get(url)
return response

def callback(future):
print(future.result())

url_list = [‘http://www.github.com’, ‘http://www.bing.com’]
pool = ThreadPoolExecutor(5)
for url in url_list:
v = pool.submit(fetch_async, url)
v.add_done_callback(callback)
pool.shutdown(wait=True)

复制代码
2.多线程+回调函数执行

复制代码
from concurrent.futures import ProcessPoolExecutor
import requests
def fetch_async(url):
response = requests.get(url)
return response

url_list = [‘http://www.github.com’, ‘http://www.bing.com’]
pool = ProcessPoolExecutor(5)
for url in url_list:
pool.submit(fetch_async, url)
pool.shutdown(wait=True)

复制代码
3.多进程执行

复制代码
from concurrent.futures import ProcessPoolExecutor
import requests
def fetch_async(url):
response = requests.get(url)
return response

def callback(future):
print(future.result())

url_list = [‘http://www.github.com’, ‘http://www.bing.com’]
pool = ProcessPoolExecutor(5)
for url in url_list:
v = pool.submit(fetch_async, url)
v.add_done_callback(callback)
pool.shutdown(wait=True)

复制代码
3.多进程+回调函数执行
通过上述代码均可以完成对请求性能的提高,对于多线程和多进行的缺点是在IO阻塞时会造成了线程和进程的浪费,所以异步IO会是首选:

复制代码
import asyncio
@asyncio.coroutine
def func1():
print(‘before…func1…’)
yield from asyncio.sleep(5)
print(‘end…func1…’)

tasks = [func1(), func1()]

loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
loop.close()

复制代码
1.asyncio示例1

复制代码
import asyncio
@asyncio.coroutine
def fetch_async(host, url=’/’):
print(host, url)
reader, writer = yield from asyncio.open_connection(host, 80)

request_header_content = “”“GET %s HTTP/1.0\r\nHost: %s\r\n\r\n”"" % (url, host,)
request_header_content = bytes(request_header_content, encoding=‘utf-8’)

writer.write(request_header_content)
yield from writer.drain()
text = yield from reader.read()
print(host, url, text)
writer.close()
tasks = [
fetch_async(‘www.cnblogs.com’, ‘/wupeiqi/’),
fetch_async(‘dig.chouti.com’, ‘/pic/show?nid=4073644713430508&lid=10273091’)
]

loop = asyncio.get_event_loop()
results = loop.run_until_complete(asyncio.gather(*tasks))
loop.close()

复制代码
1.asyncio示例2

复制代码
import aiohttp
import asyncio
@asyncio.coroutine
def fetch_async(url):
print(url)
response = yield from aiohttp.request(‘GET’, url)

data = yield from response.read()

print(url, data)

print(url, response)
response.close()

tasks = [fetch_async(‘http://www.google.com/’), fetch_async(‘http://www.chouti.com/’)]

event_loop = asyncio.get_event_loop()
results = event_loop.run_until_complete(asyncio.gather(*tasks))
event_loop.close()

复制代码
2.asyncio + aiohttp

复制代码
import asyncio
import requests
@asyncio.coroutine
def fetch_async(func, *args):
loop = asyncio.get_event_loop()
future = loop.run_in_executor(None, func, *args)
response = yield from future
print(response.url, response.content)

tasks = [
fetch_async(requests.get, ‘http://www.cnblogs.com/wupeiqi/’),
fetch_async(requests.get, ‘http://dig.chouti.com/pic/show?nid=4073644713430508&lid=10273091’)
]

loop = asyncio.get_event_loop()
results = loop.run_until_complete(asyncio.gather(*tasks))
loop.close()

复制代码
3.asyncio + requests

复制代码
import gevent
import requests
from gevent import monkey

monkey.patch_all()

def fetch_async(method, url, req_kwargs):
print(method, url, req_kwargs)
response = requests.request(method=method, url=url, **req_kwargs)
print(response.url, response.content)

##### 发送请求

gevent.joinall([
gevent.spawn(fetch_async, method=‘get’, url=‘https://www.python.org/’, req_kwargs={}),
gevent.spawn(fetch_async, method=‘get’, url=‘https://www.yahoo.com/’, req_kwargs={}),
gevent.spawn(fetch_async, method=‘get’, url=‘https://github.com/’, req_kwargs={}),
])

##### 发送请求(协程池控制最大协程数量)

from gevent.pool import Pool

pool = Pool(None)

gevent.joinall([

pool.spawn(fetch_async, method=‘get’, url=‘https://www.python.org/’, req_kwargs={}),

pool.spawn(fetch_async, method=‘get’, url=‘https://www.yahoo.com/’, req_kwargs={}),

pool.spawn(fetch_async, method=‘get’, url=‘https://www.github.com/’, req_kwargs={}),

])

复制代码
4.gevent + requests

复制代码
import grequests
request_list = [
grequests.get(‘http://httpbin.org/delay/1’, timeout=0.001),
grequests.get(‘http://fakedomain/’),
grequests.get(‘http://httpbin.org/status/500’)
]

##### 执行并获取响应列表

response_list = grequests.map(request_list)

print(response_list)

##### 执行并获取响应列表(处理异常)

def exception_handler(request, exception):

print(request,exception)

print(“Request failed”)

response_list = grequests.map(request_list, exception_handler=exception_handler)

print(response_list)

复制代码
5.grequests

复制代码
from twisted.web.client import getPage
from twisted.internet import reactor
REV_COUNTER = 0
REQ_COUNTER = 0

def callback(contents):
print(contents,)

global REV_COUNTER
REV_COUNTER += 1
if REV_COUNTER == REQ_COUNTER:
reactor.stop()
url_list = [‘http://www.bing.com’, ‘http://www.baidu.com’, ]
REQ_COUNTER = len(url_list)
for url in url_list:
deferred = getPage(bytes(url, encoding=‘utf8’))
deferred.addCallback(callback)
reactor.run()

复制代码
6.twisted示例1

复制代码
#!/usr/bin/env python

-- coding:utf-8 --

from twisted.internet import defer
from twisted.web.client import getPage
from twisted.internet import reactor
@defer.inlineCallbacks
def task(url):
url = url
while url:
ret = getPage(bytes(url, encoding=‘utf8’))
ret.addCallback(one_done)
url = yield ret

i = 0

def one_done(arg):
global i
i += 1
if i == 10:
return
print(‘one’, arg)
return ‘http://www.cnblogs.com

@defer.inlineCallbacks
def task_list():
start_url_list = [
http://www.cnblogs.com’,
]
defer_list = []
for url in start_url_list:
deferObj = task(url)
defer_list.append(deferObj)
yield defer.DeferredList(defer_list)

def all_done(arg):
print(‘done’, arg)
reactor.stop()

if name == ‘main’:
d = task_list()
print(type(d))
d.addBoth(all_done)
reactor.run()

复制代码
6.twisted示例2

复制代码
#!/usr/bin/env python

-- coding:utf-8 --

from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPRequest
from tornado import ioloop
def handle_response(response):
if response.error:
print(“Error:”, response.error)
else:
print(response.body)

方法同twisted

ioloop.IOLoop.current().stop()

def func():
url_list = [
http://www.google.com’,
http://127.0.0.1:8000/test2/’,
]
for url in url_list:
print(url)
http_client = AsyncHTTPClient()
http_client.fetch(HTTPRequest(url), handle_response)

ioloop.IOLoop.current().add_callback(func)
ioloop.IOLoop.current().start()

复制代码
7.tornado示例
以上均是Python内置以及第三方模块提供异步IO请求模块,使用简便大大提高效率,而对于异步IO请求的本质则是【非阻塞Socket】+【IO多路复用】:

复制代码
import select
import socket
class HttpRequest(object):
def init(self, sk, host, callback):
self.sk = sk
self.host = host
self.callback = callback

def fileno(self):
return self.sk.fileno()
class AsyncRequest(object):
def init(self):
self.conn = []
self.connection = []

def add_request(self, host, callback):
try:
sk = socket.socket()
sk.setblocking(False)
sk.connect((host, 80, ))

</span><span style="color: #0000ff;">except</span><span style="color: #000000;"> BlockingIOError as e:
    </span><span style="color: #0000ff;">pass</span><span style="color: #000000;">

request </span>=<span style="color: #000000;"> HttpRequest(sk, host, callback)
self.conn.append(request)
self.connection.append(request)

def running(self):
while True:
# select并不仅仅只能监听socket套接字对象
# 再自定义对象中实现fileno()方法, 并返回socket文件描述符对象即可 return socket_obj.fileno()
r_list, w_list, e_list = select.select(self.conn, self.connection, self.conn, 0.05)
for w in w_list:
# w_list 表示连接上的请求
http_msg = “GET / HTTP/1.0\r\nHost:%s\r\n\r\n” % (w.host,)
w.sk.send(bytes(http_msg, encoding=‘utf-8’))
self.connection.remove(w)
for r in r_list:
# r_list 表示有数据返回
data = bytes()
while True:
try:
recv = r.sk.recv(8096)
data += recv
except Exception as e:
break
r.callback(data)
r.sk.close()
self.conn.remove®
if len(self.conn) == 0:
break
def func1(data):
print(data)

def func2(data):
print(data)

url_list = [
{‘host’: ‘www.baidu.com’, ‘callback’: func1},
{‘host’: ‘www.cnblogs.com’, ‘callback’: func2},
{‘host’: ‘www.huaban.com’, ‘callback’: func2},
]

async_obj = AsyncRequest()
for item in url_list:
async_obj.add_request(item[‘host’], item[‘callback’])

async_obj.running()

复制代码
NB异步框架版本1.1
Scrapy介绍及安装
Scrapy是一个为了爬取网站数据,提取结构性数据而编写的应用框架。 其可以应用在数据挖掘,信息处理或存储历史数据等一系列的程序中。
其最初是为了页面抓取 (更确切来说, 网络抓取 )所设计的, 也可以应用在获取API所返回的数据(例如 Amazon Associates Web Services ) 或者通用的网络爬虫。Scrapy用途广泛,可以用于数据挖掘、监测和自动化测试。

Scrapy主要包括了以下组件:

引擎(Scrapy)
用来处理整个系统的数据流处理, 触发事务(框架核心)
调度器(Scheduler)
用来接受引擎发过来的请求, 压入队列中, 并在引擎再次请求的时候返回. 可以想像成一个URL(抓取网页的网址或者说是链接)的优先队列, 由它来决定下一个要抓取的网址是什么, 同时去除重复的网址
下载器(Downloader)
用于下载网页内容, 并将网页内容返回给蜘蛛(Scrapy下载器是建立在twisted这个高效的异步模型上的)
爬虫(Spiders)
爬虫是主要干活的, 用于从特定的网页中提取自己需要的信息, 即所谓的实体(Item)。用户也可以从中提取出链接,让Scrapy继续抓取下一个页面
项目管道(Pipeline)
负责处理爬虫从网页中抽取的实体,主要的功能是持久化实体、验证实体的有效性、清除不需要的信息。当页面被爬虫解析后,将被发送到项目管道,并经过几个特定的次序处理数据。
下载器中间件(Downloader Middlewares)
位于Scrapy引擎和下载器之间的框架,主要是处理Scrapy引擎与下载器之间的请求及响应。
爬虫中间件(Spider Middlewares)
介于Scrapy引擎和爬虫之间的框架,主要工作是处理蜘蛛的响应输入和请求输出。
调度中间件(Scheduler Middewares)
介于Scrapy引擎和调度之间的中间件,从Scrapy引擎发送到调度的请求和响应。
Scrapy运行流程大概如下:

引擎从调度器中取出一个链接(URL)用于接下来的抓取
引擎把URL封装成一个请求(Request)传给下载器
下载器把资源下载下来,并封装成应答包(Response)
爬虫解析Response
解析出实体(Item),则交给实体管道进行进一步的处理
解析出的是链接(URL),则把URL交给调度器等待抓取
Scrapy安装

复制代码
Linux
pip3 install scrapy
Windows
a. pip3 install wheel
b. 下载twisted http://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted
c. 进入下载目录,执行 pip3 install Twisted‑17.1.0‑cp35‑cp35m‑win_amd64.whl
d. pip3 install scrapy
e. 下载并安装pywin32:https://sourceforge.net/projects/pywin32/files/

复制代码
Scrapy命令及项目结构
1,基本命令

复制代码

  1. scrapy startproject 项目名称
    • 在当前目录中创建中创建一个项目文件(类似于Django)
  2. scrapy genspider [-t template]
  • 创建爬虫应用
    如:
    scrapy gensipider -t basic oldboy oldboy.com
    scrapy gensipider -t xmlfeed autohome autohome.com.cn
    PS:
    查看所有命令:scrapy gensipider -l
    查看模板命令:scrapy gensipider -d 模板名称
  1. scrapy list
  • 展示爬虫应用列表
  1. scrapy crawl 爬虫应用名称
  • 运行单独爬虫应用

复制代码
2,项目结构以及应用简介

复制代码
project_name/
scrapy.cfg
project_name/
init.py
items.py
pipelines.py
settings.py
spiders/
init.py
爬虫1.py
爬虫2.py
爬虫3.py
复制代码
文件说明:

scrapy.cfg 项目的主配置信息。(真正爬虫相关的配置信息在settings.py文件中)
items.py 设置数据存储模板,用于结构化数据,如:Django的Model
pipelines 数据处理行为,如:一般结构化的数据持久化
settings.py 配置文件,如:递归的层数、并发数,延迟下载等
spiders 爬虫目录,如:创建文件,编写爬虫规则
注意:一般创建爬虫文件时,以网站域名命名

import scrapy
class XiaoHuarSpider(scrapy.spiders.Spider):
name = “xiaohuar” # 爬虫名称 *****
allowed_domains = [“xiaohuar.com”] # 允许的域名
start_urls = [
http://www.xiaohuar.com/hua/”, # 其实URL
]

def parse(self, response):

访问起始URL并获取结果后的回调函数

爬虫1.py
Scrapy初体验
编写 Spider/spider_name.py 文件

复制代码
import scrapy
from scrapy.selector import HtmlXPathSelector
from scrapy.http.request import Request
class DigSpider(scrapy.Spider):

爬虫应用的名称,通过此名称启动爬虫命令

name = “dig”

允许的域名

allowed_domains = [“chouti.com”]

起始URL

start_urls = [
http://dig.chouti.com/’,
]

has_request_set = {}

def parse(self, response):
print(response.url)

hxs </span>=<span style="color: #000000;"> HtmlXPathSelector(response)
page_list </span>= hxs.<span style="color: #0000ff;">select</span>(<span style="color: #800000;">'</span><span style="color: #800000;">//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href</span><span style="color: #800000;">'</span><span style="color: #000000;">).extract()
</span><span style="color: #0000ff;">for</span> page <span style="color: #0000ff;">in</span><span style="color: #000000;"> page_list:
    page_url </span>= <span style="color: #800000;">'</span><span style="color: #800000;">http://dig.chouti.com%s</span><span style="color: #800000;">'</span> %<span style="color: #000000;"> page
    key </span>=<span style="color: #000000;"> self.md5(page_url)
    </span><span style="color: #0000ff;">if</span> key <span style="color: #0000ff;">in</span><span style="color: #000000;"> self.has_request_set:
        pass
    </span><span style="color: #0000ff;">else</span><span style="color: #000000;">:
        self.has_request_set[key] </span>=<span style="color: #000000;"> page_url
        obj </span>= Request(url=page_url, method=<span style="color: #800000;">'</span><span style="color: #800000;">GET</span><span style="color: #800000;">'</span>, callback=<span style="color: #000000;">self.parse)
        </span><span style="color: #0000ff;">yield</span><span style="color: #000000;"> obj

@staticmethod
def md5(val):
import hashlib
ha = hashlib.md5()
ha.update(bytes(val, encoding=‘utf-8’))
key = ha.hexdigest()
return key
复制代码
DEMO
执行此文件需

scrapy crawl dig --nolog
对于上述代码重要之处在于:

Request是一个封装用户请求的类,在回调函数中yield该对象表示继续访问
HtmlXpathSelector用于结构化HTML代码并提供选择器功能
选择器

复制代码
#!/usr/bin/env python

-- coding:utf-8 --

from scrapy.selector import Selector, HtmlXPathSelector
from scrapy.http import HtmlResponse
html = “”"

""" response = HtmlResponse(url='http://example.com', body=html,encoding='utf-8') # hxs = HtmlXPathSelector(response) # print(hxs) # hxs = Selector(response=response).xpath('//a') # print(hxs) # hxs = Selector(response=response).xpath('//a[2]') # print(hxs) # hxs = Selector(response=response).xpath('//a[@id]') # print(hxs) # hxs = Selector(response=response).xpath('//a[@id="i1"]') # print(hxs) # hxs = Selector(response=response).xpath('//a[@href="link.html"][@id="i1"]') # print(hxs) # hxs = Selector(response=response).xpath('//a[contains(@href, "link")]') # print(hxs) # hxs = Selector(response=response).xpath('//a[starts-with(@href, "link")]') # print(hxs) # hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]') # print(hxs) # hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/text()').extract() # print(hxs) # hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/@href').extract() # print(hxs) # hxs = Selector(response=response).xpath('/html/body/ul/li/a/@href').extract() # print(hxs) # hxs = Selector(response=response).xpath('//body/ul/li/a/@href').extract_first() # print(hxs) # ul_list = Selector(response=response).xpath(’//body/ul/li’) # for item in ul_list: # v = item.xpath(’./a/span’) # # 或 # # v = item.xpath(‘a/span’) # # 或 # # v = item.xpath(’*/a/span’) # print(v)

复制代码
HTML选择器解析
示例

复制代码

-- coding: utf-8 --

import scrapy
from scrapy.selector import HtmlXPathSelector
from scrapy.http.request import Request
from scrapy.http.cookies import CookieJar
from scrapy import FormRequest
class ChouTiSpider(scrapy.Spider):

爬虫应用的名称,通过此名称启动爬虫命令

name = “chouti”

允许的域名

allowed_domains = [“chouti.com”]

cookie_dict = {}
has_request_set = {}

def start_requests(self):
url = ‘http://dig.chouti.com/
# return [Request(url=url, callback=self.login)]
yield Request(url=url, callback=self.login)

def login(self, response):
cookie_jar = CookieJar()
cookie_jar.extract_cookies(response, response.request)
for k, v in cookie_jar._cookies.items():
for i, j in v.items():
for m, n in j.items():
self.cookie_dict[m] = n.value

req </span>=<span style="color: #000000;"> Request(
    url</span>=<span style="color: #800000;">'</span><span style="color: #800000;">http://dig.chouti.com/login</span><span style="color: #800000;">'</span><span style="color: #000000;">,
    method</span>=<span style="color: #800000;">'</span><span style="color: #800000;">POST</span><span style="color: #800000;">'</span><span style="color: #000000;">,
    headers</span>={<span style="color: #800000;">'</span><span style="color: #800000;">Content-Type</span><span style="color: #800000;">'</span>: <span style="color: #800000;">'</span><span style="color: #800000;">application/x-www-form-urlencoded; charset=UTF-8</span><span style="color: #800000;">'</span><span style="color: #000000;">},
    body</span>=<span style="color: #800000;">'</span><span style="color: #800000;">phone=8615131255089&amp;password=pppppppp&amp;oneMonth=1</span><span style="color: #800000;">'</span><span style="color: #000000;">,
    cookies</span>=<span style="color: #000000;">self.cookie_dict,
    callback</span>=<span style="color: #000000;">self.check_login
)
</span><span style="color: #0000ff;">yield</span><span style="color: #000000;"> req

def check_login(self, response):
req = Request(
url=‘http://dig.chouti.com/’,
method=‘GET’,
callback=self.show,
cookies=self.cookie_dict,
dont_filter=True
)
yield req

def show(self, response):
# print(response)
hxs = HtmlXPathSelector(response)
news_list = hxs.select(’//div[@id=“content-list”]/div[@class=“item”]’)
for new in news_list:
# temp = new.xpath(‘div/div[@class=“part2”]/@share-linkid’).extract()
link_id = new.xpath(’*/div[@class=“part2”]/@share-linkid’).extract_first()
yield Request(
url=‘http://dig.chouti.com/link/vote?linksId=%s’ %(link_id,),
method=‘POST’,
cookies=self.cookie_dict,
callback=self.do_favor
)

page_list </span>= hxs.select(<span style="color: #800000;">'</span><span style="color: #800000;">//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href</span><span style="color: #800000;">'</span><span style="color: #000000;">).extract()
</span><span style="color: #0000ff;">for</span> page <span style="color: #0000ff;">in</span><span style="color: #000000;"> page_list:

    page_url </span>= <span style="color: #800000;">'</span><span style="color: #800000;">http://dig.chouti.com%s</span><span style="color: #800000;">'</span> %<span style="color: #000000;"> page
    </span><span style="color: #0000ff;">import</span><span style="color: #000000;"> hashlib
    hash </span>=<span style="color: #000000;"> hashlib.md5()
    hash.update(bytes(page_url,encoding</span>=<span style="color: #800000;">'</span><span style="color: #800000;">utf-8</span><span style="color: #800000;">'</span><span style="color: #000000;">))
    key </span>=<span style="color: #000000;"> hash.hexdigest()
    </span><span style="color: #0000ff;">if</span> key <span style="color: #0000ff;">in</span><span style="color: #000000;"> self.has_request_set:
        </span><span style="color: #0000ff;">pass</span>
    <span style="color: #0000ff;">else</span><span style="color: #000000;">:
        self.has_request_set[key] </span>=<span style="color: #000000;"> page_url
        </span><span style="color: #0000ff;">yield</span><span style="color: #000000;"> Request(
            url</span>=<span style="color: #000000;">page_url,
            method</span>=<span style="color: #800000;">'</span><span style="color: #800000;">GET</span><span style="color: #800000;">'</span><span style="color: #000000;">,
            callback</span>=<span style="color: #000000;">self.show
        )

def do_favor(self, response):
print(response.text)
复制代码
自动登陆抽屉并点赞
注意:settings.py中设置DEPTH_LIMIT = 1来指定"递归"的层数。

Scrapy格式化处理
上述实例只是简单的处理,所以在parse方法中直接处理。如果对于想要获取更多的数据处理,则可以利用Scrapy的items将数据格式化,然后统一交由pipelines来处理。接下来演示一个下载校花网图片的DEMO。

复制代码

-- coding: utf-8 --

import scrapy
import sys
import io
from xiaohua.items import XiaohuaItem
from scrapy.selector import Selector, HtmlXPathSelector
from scrapy.http.request import Request
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding=‘gb18030’)

class XiaohuarSpider(scrapy.Spider):

爬虫应用的名称,通过此名称启动爬虫命令

name = “xiaohuar”

允许的域名

allowed_domains = [“xiaohuar.com”]
start_urls = [‘http://www.xiaohuar.com/hua/’]
visited_urls = set()

def parse(self, response):
# 分析页面
# 找到页面中符合规则的内容(校花图片),保存
# 找到所有的a标签,再访问其他a标签,一层一层的搞下去
hxs1 = Selector(response=response).xpath(’//div[@id=“list_img”]’) # 标签对象列表
for obj in hxs1:
img_list = obj.xpath(’.//div[@class=“img”]’)
for img in img_list:
img_url = img.xpath(’.//a/img/@src’).extract_first().strip()
img_title = img.xpath(’.//span[@class=“price”]/text()’).extract_first().strip()
img_obj = XiaohuaItem(title=img_title, href=img_url)
yield img_obj

hxs </span>= Selector(response=<span style="color: #000000;">response).xpath(
    </span><span style="color: #800000;">'</span><span style="color: #800000;">//a[re:test(@href, "http://www.xiaohuar.com/list-1-\d+.html")]/@href</span><span style="color: #800000;">'</span><span style="color: #000000;">
).extract()
</span><span style="color: #0000ff;">for</span> url <span style="color: #0000ff;">in</span><span style="color: #000000;"> hxs:
    md5_url </span>=<span style="color: #000000;"> self.encrypt(url)
    </span><span style="color: #0000ff;">if</span> md5_url <span style="color: #0000ff;">not</span> <span style="color: #0000ff;">in</span><span style="color: #000000;"> self.visited_urls:
        self.visited_urls.add(md5_url)
        </span><span style="color: #0000ff;">yield</span> Request(url=url, callback=<span style="color: #000000;">self.parse)

def encrypt(self, url):
import hashlib
hash_obj = hashlib.md5()
hash_obj.update(bytes(url, encoding=‘utf-8’))
return hash_obj.hexdigest()
复制代码
spiders/xiaohuar.py

复制代码
import scrapy
class XiaohuaItem(scrapy.Item):

define the fields for your item here like:

name = scrapy.Field()

title = scrapy.Field()
href = scrapy.Field()

复制代码
items

复制代码
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy.http.request import Request
class XiaohuaPipeline(ImagesPipeline):
“”"
Scrapy 封装了 ImagesPipeline 的管道, 辣么只需要继承一下它, 并重写了俩个方法(名字不可随意修改)

如果你不想使用它, 打开注释掉的内容即可
“”"

# def process_item(self, item, spider):
# img_url = ‘http://www.xiaohuar.com{}’.format(item[‘href’])
# print(img_url)
# img_name = ‘{}.jpg’.format(item[‘title’])
# response = requests.get(url=img_url)
# with open(os.path.join(‘image’, img_name), ‘wb’) as f:
# f.write(response.content)
# # return item

def get_media_requests(self, item, info):
img_url = ‘http://www.xiaohuar.com{}’.format(item[‘href’])
yield Request(img_url)

def item_completed(self, results, item, info):
# print(results)
image_paths = [x[‘path’] for ok, x in results if ok]
if not image_paths:
raise DropItem(“Item contains no images”)
item[‘image_paths’] = image_paths
return item
复制代码
pipelines

复制代码
ITEM_PIPELINES = {
‘xiaohua.pipelines.XiaohuaPipeline’: 300,
‘scrapy.contrib.pipeline.images.ImagesPipeline’: 1
}

每行后面的整型值,确定了他们运行的顺序,item按数字从低到高的顺序,通过pipeline,通常将这些数字定义在0-1000范围内。

递归深度

DEPTH_LIMIT = 1

项目图片下载路径

IMAGES_STORE = ‘F:\python\spiderTest\xiaohua\image’

复制代码
settings
对于pipeline可以做更多,如下:

复制代码
from scrapy.exceptions import DropItem
class CustomPipeline(object):
def init(self,v):
self.value = v

def process_item(self, item, spider):
# 操作并进行持久化

<span style="color: #008000;">#</span><span style="color: #008000;"> return表示会被后续的pipeline继续处理</span>
<span style="color: #0000ff;">return</span><span style="color: #000000;"> item

</span><span style="color: #008000;">#</span><span style="color: #008000;"> 表示将item丢弃,不会被后续pipeline处理</span>
<span style="color: #008000;">#</span><span style="color: #008000;"> raise DropItem()</span>

@classmethod
def from_crawler(cls, crawler):
“”"
初始化时候,用于创建pipeline对象
:param crawler:
:return:
“”"
val = crawler.settings.getint(‘MMMM’)
return cls(val)

def open_spider(self,spider):
“”"
爬虫开始执行时,调用
:param spider:
:return:
“”"
print(‘000000’)

def close_spider(self,spider):
“”"
爬虫关闭时,被调用
:param spider:
:return:
“”"
print(‘111111’)
复制代码
自定义pipeline
Scrapy中间件
爬虫中间件

爬虫中间件是介入到Scrapy的spider处理机制的钩子框架,您可以添加代码来处理发送给 Spiders 的response及spider产生的item和 request。

激活爬虫中间件

要启用 spider中间件,您可以将其加入到 SPIDER_MIDDLEWARES 设置中。 该设置是一个字典,键为中间件的路径,值为中间件的顺序(order)。

在 settings 文件中配置如下,如果值为None即为不启用

SPIDER_MIDDLEWARES = {
‘myproject.middlewares.CustomSpiderMiddleware’: 543,
‘scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware’: None,
}
编写自己的爬虫中间件

class SpiderMiddleware(object):
def process_spider_input(self,response, spider): “”" 下载完成,执行,然后交给parse处理 :param response: :param spider: :return: “”" pass def process_spider_output(self,response, result, spider): “”" spider处理完成,返回时调用 :param response: :param result: :param spider: :return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable) “”" return result def process_spider_exception(self,response, exception, spider): “”" 异常调用 :param response: :param exception: :param spider: :return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline “”" return None def process_start_requests(self,start_requests, spider): “”" 爬虫启动时调用 :param start_requests: :param spider: :return: 包含 Request 对象的可迭代对象 “”" return start_requests

爬虫中间件

爬虫中间件

下载中间件

下载器中间件是介于Scrapy的request/response处理的钩子框架。 是用于全局修改Scrapy request和response的一个轻量、底层的系统。

激活下载中间件

要激活下载器中间件组件,将其加入到 DOWNLOADER_MIDDLEWARES 设置中。 该设置是一个字典(dict),键为中间件类的路径,值为其中间件的顺序(order)。

在 settings 文件中配置, 值为None即为不启用

DOWNLOADER_MIDDLEWARES = {
‘myproject.middlewares.CustomDownloaderMiddleware’: 543,
‘scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware’: None,
}
编写自己的下载中间件

复制代码
class DownMiddleware1(object):
def process_request(self, request, spider):
“”"
请求需要被下载时,经过所有下载器中间件的process_request调用
:param request:
:param spider:
:return:
None,继续后续中间件去下载;
Response对象,停止process_request的执行,开始执行process_response
Request对象,停止中间件的执行,将Request重新调度器
raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
“”"
pass
def process_response(self, request, response, spider): “”" spider处理完成,返回时调用 :param response: :param result: :param spider: :return: Response 对象:转交给其他中间件process_response Request 对象:停止中间件,request会被重新调度下载 raise IgnoreRequest 异常:调用Request.errback “”" print(‘response1’) return response def process_exception(self, request, exception, spider): “”" 当下载处理器(download handler)或 process_request() (下载中间件)抛出异常 :param response: :param exception: :param spider: :return: None:继续交给后续中间件处理异常; Response对象:停止后续process_exception方法 Request对象:停止中间件,request将会被重新调用下载 “”" return None

复制代码
下载中间件
Scrapy自定制命令
在spiders同级创建任意目录,如:commands
在其中创建 crawlall.py 文件 (此处文件名就是自定义的命令)

复制代码
from scrapy.commands import ScrapyCommand
from scrapy.utils.project import get_project_settings
class Command(ScrapyCommand):

requires_project </span>=<span style="color: #000000;"> True

</span><span style="color: #0000ff;">def</span><span style="color: #000000;"> syntax(self):
    </span><span style="color: #0000ff;">return</span> <span style="color: #800000;">'</span><span style="color: #800000;">[options]</span><span style="color: #800000;">'</span>

<span style="color: #0000ff;">def</span><span style="color: #000000;"> short_desc(self):
    </span><span style="color: #0000ff;">return</span> <span style="color: #800000;">'</span><span style="color: #800000;">Runs all of the spiders</span><span style="color: #800000;">'</span>

<span style="color: #0000ff;">def</span><span style="color: #000000;"> run(self, args, opts):
    spider_list </span>=<span style="color: #000000;"> self.crawler_process.spiders.list()
    </span><span style="color: #0000ff;">for</span> name <span style="color: #0000ff;">in</span><span style="color: #000000;"> spider_list:
        self.crawler_process.crawl(name, </span>**opts.<span style="color: #800080;">__dict__</span><span style="color: #000000;">)
    self.crawler_process.start()</span></pre>

复制代码
crawlall.py
在settings.py 中添加配置 COMMANDS_MODULE = ‘项目名称.目录名称’
在项目目录执行命令:scrapy crawlall
Scrapy避免重复访问
scrapy默认使用 scrapy.dupefilter.RFPDupeFilter 进行去重,相关配置有:

DUPEFILTER_CLASS = ‘scrapy.dupefilter.RFPDupeFilter’
DUPEFILTER_DEBUG = False
JOBDIR = “保存范文记录的日志路径,如:/root/” # 最终路径为 /root/requests.seen

复制代码
class RepeatUrl:
def init(self):
self.visited_url = set()
@classmethod
def from_settings(cls, settings):
“”"
初始化时,调用
:param settings:
:return:
“”"
return cls()

def request_seen(self, request):
“”"
检测当前请求是否已经被访问过
:param request:
:return: True表示已经访问过;False表示未访问过
“”"
if request.url in self.visited_url:
return True
self.visited_url.add(request.url)
return False

def open(self):
“”"
开始爬去请求时,调用
:return:
“”"
print(‘open replication’)

def close(self, reason):
“”"
结束爬虫爬取时,调用
:param reason:
:return:
“”"
print(‘close replication’)

def log(self, request, spider):
“”"
记录日志
:param request:
:param spider:
:return:
“”"
print(‘repeat’, request.url)

复制代码
自定义Url去重操作
Scrapy中Settings

复制代码

-- coding: utf-8 --

Scrapy settings for step8_king project

For simplicity, this file contains only settings considered important or

commonly used. You can find more settings consulting the documentation:

http://doc.scrapy.org/en/latest/topics/settings.html

http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html

http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

1. 爬虫名称

BOT_NAME = ‘step8_king’

2. 爬虫应用路径

SPIDER_MODULES = [‘step8_king.spiders’]
NEWSPIDER_MODULE = ‘step8_king.spiders’

Crawl responsibly by identifying yourself (and your website) on the user-agent

3. 客户端 user-agent请求头

USER_AGENT = ‘step8_king (+http://www.yourdomain.com)’

Obey robots.txt rules

4. 禁止爬虫配置

ROBOTSTXT_OBEY = False

Configure maximum concurrent requests performed by Scrapy (default: 16)

5. 并发请求数

CONCURRENT_REQUESTS = 4

Configure a delay for requests for the same website (default: 0)

See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay

See also autothrottle settings and docs

6. 延迟下载秒数

DOWNLOAD_DELAY = 2

The download delay setting will honor only one of:

7. 单域名访问并发数,并且延迟下次秒数也应用在每个域名

CONCURRENT_REQUESTS_PER_DOMAIN = 2

单IP访问并发数,如果有值则忽略:CONCURRENT_REQUESTS_PER_DOMAIN,并且延迟下次秒数也应用在每个IP

CONCURRENT_REQUESTS_PER_IP = 3

Disable cookies (enabled by default)

8. 是否支持cookie,cookiejar进行操作cookie

COOKIES_ENABLED = True

COOKIES_DEBUG = True

Disable Telnet Console (enabled by default)

9. Telnet用于查看当前爬虫的信息,操作爬虫等…

使用telnet ip port ,然后通过命令操作

TELNETCONSOLE_ENABLED = True

TELNETCONSOLE_HOST = ‘127.0.0.1’

TELNETCONSOLE_PORT = [6023,]

10. 默认请求头

Override the default request headers:

DEFAULT_REQUEST_HEADERS = {

‘Accept’: ‘text/html,application/xhtml+xml,application/xml;q=0.9,/;q=0.8’,

‘Accept-Language’: ‘en’,

}

Configure item pipelines

See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html

11. 定义pipeline处理请求

ITEM_PIPELINES = {

‘step8_king.pipelines.JsonPipeline’: 700,

‘step8_king.pipelines.FilePipeline’: 500,

}

12. 自定义扩展,基于信号进行调用

Enable or disable extensions

See http://scrapy.readthedocs.org/en/latest/topics/extensions.html

EXTENSIONS = {

# ‘step8_king.extensions.MyExtension’: 500,

}

13. 爬虫允许的最大深度,可以通过meta查看当前深度;0表示无深度

DEPTH_LIMIT = 3

14. 爬取时,0表示深度优先Lifo(默认);1表示广度优先FiFo

后进先出,深度优先

DEPTH_PRIORITY = 0

SCHEDULER_DISK_QUEUE = ‘scrapy.squeue.PickleLifoDiskQueue’

SCHEDULER_MEMORY_QUEUE = ‘scrapy.squeue.LifoMemoryQueue’

先进先出,广度优先

DEPTH_PRIORITY = 1

SCHEDULER_DISK_QUEUE = ‘scrapy.squeue.PickleFifoDiskQueue’

SCHEDULER_MEMORY_QUEUE = ‘scrapy.squeue.FifoMemoryQueue’

15. 调度器队列

SCHEDULER = ‘scrapy.core.scheduler.Scheduler’

from scrapy.core.scheduler import Scheduler

16. 访问URL去重

DUPEFILTER_CLASS = ‘step8_king.duplication.RepeatUrl’

Enable and configure the AutoThrottle extension (disabled by default)

See http://doc.scrapy.org/en/latest/topics/autothrottle.html

“”"
17. 自动限速算法
from scrapy.contrib.throttle import AutoThrottle
自动限速设置

  1. 获取最小延迟 DOWNLOAD_DELAY
  2. 获取最大延迟 AUTOTHROTTLE_MAX_DELAY
  3. 设置初始下载延迟 AUTOTHROTTLE_START_DELAY
  4. 当请求下载完成后,获取其"连接"时间 latency,即:请求连接到接受到响应头之间的时间
  5. 用于计算的… AUTOTHROTTLE_TARGET_CONCURRENCY
    target_delay = latency / self.target_concurrency
    new_delay = (slot.delay + target_delay) / 2.0 # 表示上一次的延迟时间
    new_delay = max(target_delay, new_delay)
    new_delay = min(max(self.mindelay, new_delay), self.maxdelay)
    slot.delay = new_delay
    “”"

开始自动限速

AUTOTHROTTLE_ENABLED = True

The initial download delay

初始下载延迟

AUTOTHROTTLE_START_DELAY = 5

The maximum download delay to be set in case of high latencies

最大下载延迟

AUTOTHROTTLE_MAX_DELAY = 10

The average number of requests Scrapy should be sending in parallel to each remote server

平均每秒并发数

AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0

Enable showing throttling stats for every response received:

是否显示

AUTOTHROTTLE_DEBUG = True

Enable and configure HTTP caching (disabled by default)

See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings

“”"
18. 启用缓存
目的用于将已经发送的请求或相应缓存下来,以便以后使用

from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware
from scrapy.extensions.httpcache import DummyPolicy
from scrapy.extensions.httpcache import FilesystemCacheStorage
“”"

是否启用缓存策略

HTTPCACHE_ENABLED = True

缓存策略:所有请求均缓存,下次在请求直接访问原来的缓存即可

HTTPCACHE_POLICY = “scrapy.extensions.httpcache.DummyPolicy”

缓存策略:根据Http响应头:Cache-Control、Last-Modified 等进行缓存的策略

HTTPCACHE_POLICY = “scrapy.extensions.httpcache.RFC2616Policy”

缓存超时时间

HTTPCACHE_EXPIRATION_SECS = 0

缓存保存路径

HTTPCACHE_DIR = ‘httpcache’

缓存忽略的Http状态码

HTTPCACHE_IGNORE_HTTP_CODES = []

缓存存储的插件

HTTPCACHE_STORAGE = ‘scrapy.extensions.httpcache.FilesystemCacheStorage’

“”"
19. 代理,需要在环境变量中设置
from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware

方式一:使用默认
os.environ
{
http_proxy:http://root:woshiniba@192.168.11.11:9999/
https_proxy:http://192.168.11.11:9999/
}
方式二:使用自定义下载中间件

def to_bytes(text, encoding=None, errors=‘strict’):
if isinstance(text, bytes):
return text
if not isinstance(text, six.string_types):
raise TypeError('to_bytes must receive a unicode, str or bytes ’
‘object, got %s’ % type(text).name)
if encoding is None:
encoding = ‘utf-8’
return text.encode(encoding, errors)

class ProxyMiddleware(object):
def process_request(self, request, spider):
PROXIES = [
{‘ip_port’: ‘111.11.228.75:80’, ‘user_pass’: ‘’},
{‘ip_port’: ‘120.198.243.22:80’, ‘user_pass’: ‘’},
{‘ip_port’: ‘111.8.60.9:8123’, ‘user_pass’: ‘’},
{‘ip_port’: ‘101.71.27.120:80’, ‘user_pass’: ‘’},
{‘ip_port’: ‘122.96.59.104:80’, ‘user_pass’: ‘’},
{‘ip_port’: ‘122.224.249.122:8088’, ‘user_pass’: ‘’},
]
proxy = random.choice(PROXIES)
if proxy[‘user_pass’] is not None:
request.meta[‘proxy’] = to_bytes(“http://%s” % proxy[‘ip_port’])
encoded_user_pass = base64.encodestring(to_bytes(proxy[‘user_pass’]))
request.headers[‘Proxy-Authorization’] = to_bytes('Basic ’ + encoded_user_pass)
print “**ProxyMiddleware have pass” + proxy[‘ip_port’]
else:
print “**ProxyMiddleware no pass” + proxy[‘ip_port’]
request.meta[‘proxy’] = to_bytes(“http://%s” % proxy[‘ip_port’])

DOWNLOADER_MIDDLEWARES = {
‘step8_king.middlewares.ProxyMiddleware’: 500,
}
“”"

“”"
20. Https访问
Https访问时有两种情况:

  1. 要爬取网站使用的可信任证书(默认支持)
    DOWNLOADER_HTTPCLIENTFACTORY = “scrapy.core.downloader.webclient.ScrapyHTTPClientFactory”
    DOWNLOADER_CLIENTCONTEXTFACTORY = “scrapy.core.downloader.contextfactory.ScrapyClientContextFactory”

  2. 要爬取网站使用的自定义证书
    DOWNLOADER_HTTPCLIENTFACTORY = “scrapy.core.downloader.webclient.ScrapyHTTPClientFactory”
    DOWNLOADER_CLIENTCONTEXTFACTORY = “step8_king.https.MySSLFactory”

    https.py

    from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
    from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate)

    class MySSLFactory(ScrapyClientContextFactory):
    def getCertificateOptions(self):
    from OpenSSL import crypto
    v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open(’/Users/wupeiqi/client.key.unsecure’, mode=‘r’).read())
    v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open(’/Users/wupeiqi/client.pem’, mode=‘r’).read())
    return CertificateOptions(
    privateKey=v1, # pKey对象
    certificate=v2, # X509对象
    verify=False,
    method=getattr(self, ‘method’, getattr(self, ‘_ssl_method’, None))
    )
    其他:
    相关类
    scrapy.core.downloader.handlers.http.HttpDownloadHandler
    scrapy.core.downloader.webclient.ScrapyHTTPClientFactory
    scrapy.core.downloader.contextfactory.ScrapyClientContextFactory
    相关配置
    DOWNLOADER_HTTPCLIENTFACTORY
    DOWNLOADER_CLIENTCONTEXTFACTORY
    “”"

“”"
21. 爬虫中间件
class SpiderMiddleware(object):

def process_spider_input(self,response, spider):
    '''
    下载完成,执行,然后交给parse处理
    :param response: 
    :param spider: 
    :return: 
    '''
    pass

def process_spider_output(self,response, result, spider):
    '''
    spider处理完成,返回时调用
    :param response:
    :param result:
    :param spider:
    :return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable)
    '''
    return result

def process_spider_exception(self,response, exception, spider):
    '''
    异常调用
    :param response:
    :param exception:
    :param spider:
    :return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline
    '''
    return None


def process_start_requests(self,start_requests, spider):
    '''
    爬虫启动时调用
    :param start_requests:
    :param spider:
    :return: 包含 Request 对象的可迭代对象
    '''
    return start_requests

内置爬虫中间件:
‘scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware’: 50,
‘scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware’: 500,
‘scrapy.contrib.spidermiddleware.referer.RefererMiddleware’: 700,
‘scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware’: 800,
‘scrapy.contrib.spidermiddleware.depth.DepthMiddleware’: 900,
“”"

from scrapy.contrib.spidermiddleware.referer import RefererMiddleware

Enable or disable spider middlewares

See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

SPIDER_MIDDLEWARES = {

‘step8_king.middlewares.SpiderMiddleware’: 543,

}

“”"
22. 下载中间件
class DownMiddleware1(object):
def process_request(self, request, spider):
‘’’
请求需要被下载时,经过所有下载器中间件的process_request调用
:param request:
:param spider:
:return:
None,继续后续中间件去下载;
Response对象,停止process_request的执行,开始执行process_response
Request对象,停止中间件的执行,将Request重新调度器
raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
‘’’
pass

def process_response(self, request, response, spider):
    '''
    spider处理完成,返回时调用
    :param response:
    :param result:
    :param spider:
    :return:
        Response 对象:转交给其他中间件process_response
        Request 对象:停止中间件,request会被重新调度下载
        raise IgnoreRequest 异常:调用Request.errback
    '''
    print('response1')
    return response

def process_exception(self, request, exception, spider):
    '''
    当下载处理器(download handler)或 process_request() (下载中间件)抛出异常
    :param response:
    :param exception:
    :param spider:
    :return:
        None:继续交给后续中间件处理异常;
        Response对象:停止后续process_exception方法
        Request对象:停止中间件,request将会被重新调用下载
    '''
    return None

默认下载中间件
{
‘scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware’: 100,
‘scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware’: 300,
‘scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware’: 350,
‘scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware’: 400,
‘scrapy.contrib.downloadermiddleware.retry.RetryMiddleware’: 500,
‘scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware’: 550,
‘scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware’: 580,
‘scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware’: 590,
‘scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware’: 600,
‘scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware’: 700,
‘scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware’: 750,
‘scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware’: 830,
‘scrapy.contrib.downloadermiddleware.stats.DownloaderStats’: 850,
‘scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware’: 900,
}
“”"

from scrapy.contrib.downloadermiddleware.httpauth import HttpAuthMiddleware

Enable or disable downloader middlewares

See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html

DOWNLOADER_MIDDLEWARES = {

‘step8_king.middlewares.DownMiddleware1’: 100,

‘step8_king.middlewares.DownMiddleware2’: 500,

}

复制代码
settings
Scrapy自定义扩展
Scrapy使用信号来通知事情发生。您可以在您的Scrapy项目中捕捉一些信号(使用 extension)来完成额外的工作或添加额外的功能,扩展Scrapy。

复制代码
from scrapy import signals
class MyExtension(object):
def init(self, value):
self.value = value

@classmethod
def from_crawler(cls, crawler):
val = crawler.settings.getint(‘MMMM’)
ext = cls(val)

crawler.signals.connect(ext.spider_opened, signal</span>=<span style="color: #000000;">signals.spider_opened)
crawler.signals.connect(ext.spider_closed, signal</span>=<span style="color: #000000;">signals.spider_closed)

</span><span style="color: #0000ff;">return</span><span style="color: #000000;"> ext

def spider_opened(self, spider):
print(‘open’)

def spider_closed(self, spider):
print(‘close’)
复制代码
View Code
更多信号API http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/api.html#module-scrapy.signalmanager

Scrapy源码剖析及自定义Scrapy框架
详细参见>>> http://www.cnblogs.com/leguan1314/articles/6892040.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值