在编写爬虫时,性能的消耗主要在IO请求中,当单进程单线程模式下请求URL时必然会引起等待,从而使得请求整体变慢。
1.同步
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import requests
def fetch_async(url):
response = requests.get(url)
return response
url_list = ['http://www.github.com', 'http://www.bing.com']
for url in url_list:
fetch_async(url)
2.多线程
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
import requests
def fetch_async(url):
response = requests.get(url)
return response
url_list = ['http://www.github.com', 'http://www.bing.com']
pool = ThreadPoolExecutor(5) #创建线程池
for url in url_list:
pool.submit(fetch_async, url)
pool.shutdown(wait=True)
3.多线程+回调函数执行
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
import requests
def fetch_async(url):
response = requests.get(url)
return response
def callback(future):
print(future.result())
url_list = ['http://www.github.com', 'http://www.bing.com']
pool = ThreadPoolExecutor(5) #创建线程池
for url in url_list:
v = pool.submit(fetch_async, url)
v.add_done_callback(callback)
pool.shutdown(wait=True)
4.多进程
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from concurrent.futures import ProcessPoolExecutor
import requests
def fetch_async(url):
response = requests.get(url)
return response
url_list = ['http://www.github.com', 'http://www.bing.com']
pool = ProcessPoolExecutor(5) #创建进程池
for url in url_list:
pool.submit(fetch_async, url)
pool.shutdown(wait=True)
5.多进程+回调函数执行
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from concurrent.futures import ProcessPoolExecutor
import requests
def fetch_async(url):
response = requests.get(url)
return response
def callback(future):
print(future.result())
url_list = ['http://www.github.com', 'http://www.bing.com']
pool = ProcessPoolExecutor(5) #创建进程池
for url in url_list:
v = pool.submit(fetch_async, url)
v.add_done_callback(callback)
pool.shutdown(wait=True)
通过上述代码均可以完成对请求性能的提高,对于多线程和多进行的缺点是在IO阻塞时会造成了线程和进程的浪费,所以异步IO回事首选:
1.asyncio示例
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import asyncio
@asyncio.coroutine
def func1():
print('before...func1......')
yield from asyncio.sleep(5)
print('end...func1......')
tasks = [func1(), func1()]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
loop.close()
2.asyncio示例
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import asyncio
@asyncio.coroutine
def fetch_async(host, url='/'):
print(host, url)
reader, writer = yield from asyncio.open_connection(host, 80)
request_header_content = """GET %s HTTP/1.0\r\nHost: %s\r\n\r\n""" % (url, host,)
request_header_content = bytes(request_header_content, encoding='utf-8')
writer.write(request_header_content)
yield from writer.drain()
text = yield from reader.read()
print(host, url, text)
writer.close()
tasks = [
fetch_async('blog.csdn.net', '/m0_37886429/'),
fetch_async('dig.chouti.com', '/pic/show?nid=4073644713430508&lid=10273091')
]
3.asyncio + aiohttp
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import aiohttp
import asyncio
@asyncio.coroutine
def fetch_async(url):
print(url)
response = yield from aiohttp.request('GET', url)
# data = yield from response.read()
# print(url, data)
print(url, response)
response.close()
tasks = [fetch_async('http://www.google.com/'), fetch_async('http://www.chouti.com/')]
event_loop = asyncio.get_event_loop()
results = event_loop.run_until_complete(asyncio.gather(*tasks))
event_loop.close()
4.asyncio + requests
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import asyncio
import requests
@asyncio.coroutine
def fetch_async(func, *args):
loop = asyncio.get_event_loop()
future = loop.run_in_executor(None, func, *args)
response = yield from future
print(response.url, response.content)
tasks = [
fetch_async(requests.get, 'https://blog.csdn.net/m0_37886429'),
fetch_async(requests.get, 'http://dig.chouti.com/pic/show?nid=4073644713430508&lid=10273091')
]
loop = asyncio.get_event_loop()
results = loop.run_until_complete(asyncio.gather(*tasks))
loop.close()
5.gevent + requests
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import gevent,requests
from gevent import monkey
monkey.patch_all()
def fetch_async(method, url, req_kwargs):
print(method, url, req_kwargs)
response = requests.request(method=method, url=url, **req_kwargs)
print(response.url, response.content)
# ##### 发送请求 #####
gevent.joinall([
gevent.spawn(fetch_async, method='get', url='https://www.python.org/', req_kwargs={}),
gevent.spawn(fetch_async, method='get', url='https://www.baidu.com/', req_kwargs={}),
gevent.spawn(fetch_async, method='get', url='https://github.com/', req_kwargs={}),
])
# ##### 发送请求(协程池控制最大协程数量) #####
# from gevent.pool import Pool
# pool = Pool(None)
# gevent.joinall([
# pool.spawn(fetch_async, method='get', url='https://www.python.org/', req_kwargs={}),
# pool.spawn(fetch_async, method='get', url='https://www.yahoo.com/', req_kwargs={}),
# pool.spawn(fetch_async, method='get', url='https://www.github.com/', req_kwargs={}),
# ])
6.grequests
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import grequests
request_list = [
grequests.get('http://httpbin.org/delay/1', timeout=0.001),
grequests.get('http://fakedomain/'),
grequests.get('http://httpbin.org/status/500')
]
# ##### 执行并获取响应列表 #####
# response_list = grequests.map(request_list)
# print(response_list)
# ##### 执行并获取响应列表(处理异常) #####
# def exception_handler(request, exception):
# print(request,exception)
# print("Request failed")
# response_list = grequests.map(request_list, exception_handler=exception_handler)
# print(response_list)
7.Twisted示例
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from twisted.web.client import getPage, defer
from twisted.internet import reactor
def all_done(arg):
reactor.stop()
def callback(contents):
print(contents)
deferred_list = []
url_list = ['http://www.bing.com', 'http://www.baidu.com', ]
for url in url_list:
deferred = getPage(bytes(url, encoding='utf8'))
deferred.addCallback(callback)
deferred_list.append(deferred)
dlist = defer.DeferredList(deferred_list)
dlist.addBoth(all_done)
reactor.run()
8.Tornado
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPRequest
from tornado import ioloop
def handle_response(response):
"""
处理返回值内容(需要维护计数器,来停止IO循环),调用 ioloop.IOLoop.current().stop()
:param response:
:return:
"""
if response.error:
print("Error:", response.error)
else:
print(response.body)
def func():
url_list = [
'http://www.baidu.com',
'http://www.bing.com',
]
for url in url_list:
print(url)
http_client = AsyncHTTPClient()
http_client.fetch(HTTPRequest(url), handle_response)
ioloop.IOLoop.current().add_callback(func)
ioloop.IOLoop.current().start()
9.Twisted更多
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from twisted.internet import reactor
from twisted.web.client import getPage
import urllib.parse
def one_done(arg):
print(arg)
reactor.stop()
post_data = urllib.parse.urlencode({'check_data': 'adf'})
post_data = bytes(post_data, encoding='utf8')
headers = {b'Content-Type': b'application/x-www-form-urlencoded'}
response = getPage(bytes('http://dig.chouti.com/login', encoding='utf8'),
method=bytes('POST', encoding='utf8'),
postdata=post_data,
cookies={},
headers=headers)
response.addBoth(one_done)
reactor.run()
以上均是Python内置以及第三方模块提供异步IO请求模块,使用简便大大提高效率,而对于异步IO请求的本质则是【非阻塞Socket】+【IO多路复用】
备注:
一、windows上安装Twisted
1、进入到 https://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted ,下载 Twisted‑18.7.0‑cp35‑cp35m‑win_amd64.whl (我的python版本是3.5的,根据需要下载对应的文件)
2、进入文件所在目录
3、pip install Twisted‑18.7.0‑cp35‑cp35m‑win_amd64.whl
二、windows安装scrapy
1、pip install wheel
2、pip install Twisted‑18.7.0‑cp35‑cp35m‑win_amd64.whl
3、pip install pywin32
4、pip install scrapy
windows安装scrapy,需要依赖 https://sourceforge.net/projects/pywin32/files/