#异步爬取详情页
import time
from requests.exceptions import Timeout
t1=time.time()
import requests
from lxml import etree
#异步爬取详情页
import asyncio
import aiohttp
template = 'https://ssr4.scrape.center/detail/{page}'
async def get(session, queue):
while True:
try:
page = queue.get_nowait()
except asyncio.QueueEmpty:
return
url = template.format(page=page)
resp = await session.get(url,timeout=60,verify_ssl=False)
r=await resp.text(encoding='utf-8')
selector = etree.HTML(r)
x1='//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[4]/p/text()'
s2=selector.xpath(x1)[0]
print(page,s2)
async def main():
async with aiohttp.ClientSession() as session:
queue = asyncio.Queue()
for page in range(1,101):
queue.put_nowait(page)
tasks = []
for _ in range(10):
task = get(session, queue)
tasks.append(task)
await asyncio.wait(tasks)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
print(time.time()-t1)
用异步爬了N次,老报错:
aiohttp.client_exceprins.ClientOSError:[WinError 64] 指定的网络名不再可用。
#多线程
import time
start_time=time.time()
import requests
from lxml import etree
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
def download_detail(p):
try:
url=f'https://ssr4.scrape.center/detail/{p}'
r=requests.get(url,timeout=60)
r.encoding="utf-8"
r=r.text
selector = etree.HTML(r)
x1='//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[4]/p/text()'
a=selector.xpath(x1)[0]
print(p,a)
except:
print("error",p)
if __name__=='__main__':
with ThreadPoolExecutor(16) as t:
for p in range(1,101):
t.submit(download_detail,p)
print("done")
print('下载所需时间为:',time.time()-start_time)
#异步爬取详情页--改良版
import time
from requests.exceptions import Timeout
t1=time.time()
import requests
from lxml import etree
#异步爬取详情页
import asyncio
import aiohttp
template = 'https://ssr4.scrape.center/detail/{page}'
async def get(session, queue):
while True:
try:
page = queue.get_nowait()
except asyncio.QueueEmpty:
return
try:
url = template.format(page=page)
resp = await session.get(url,timeout=120)
r=await resp.text(encoding='utf-8')
selector = etree.HTML(r)
x1='//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[4]/p/text()'
s2=selector.xpath(x1)[0]
print(page,s2)
except:
print('error:',page)
async def main():
async with aiohttp.ClientSession() as session:
queue = asyncio.Queue()
for page in range(1,101):
queue.put_nowait(page)
tasks = []
for _ in range(101):
task = get(session, queue)
tasks.append(task)
await asyncio.wait(tasks)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
print(time.time()-t1)