from bs4 import BeautifulSoup
import asyncio,aiohttp
from lxml import etree
table = []
async def fetch(session, url):
async with session.get(url) as response:
return await response.text(encoding='gb18030')
async def parse(html):
html = etree.HTML(html)
srcs = html.xpath('//ul[contains(@class,"bang_list")]/li')
for sr in srcs:
src = sr.xpath('./div[@class="name"]/a/@title')
if src == []:
continue
print(src)
# 将每本畅销书的上述信息加入到table中
urls = ['http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-recent7-0-0-1-%d'%i for i in range(1,26)]
async def download(url):
async with aiohttp.ClientSession() as session:
html = await fetch(session, url)
await parse(html)
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(download(url)) for url in urls]
tasks = asyncio.gather(*tasks)
loop.run_until_complete(tasks)