importasyncioimportreimportaiohttpimportaiomysqlfrom pyquery importPyQuery
stopping=False
start_url= "http://www.jobbole.com/"waitting_urls=[]
seen_urls=set()
sem= asyncio.Semaphore(3)
asyncdeffetch(url, session):
async with sem:
await asyncio.sleep(1)try:
async with session.get(url) as response:print("Status:", response.status)print("Content-type:", response.headers[‘content-type‘])if response.status in [200, 201]:
content=await response.text()returncontentexceptException as e:print(e)defextract_urls(html):
pq=PyQuery(html)for link in pq.items("a"):
url= link.attr("href")if url and url.startswith("/caijing") and url not inseen_urls:
waitting_urls.append("http://www.jobbole.com{}".format(url))
asyncdefinit_urls(url, session):
html=await fetch(url, session)
seen_urls.add(url)
extract_urls(html)
asyncdefarticle_handler(url, session, pool):#获取文章详情并解析入库
html =await fetch(url, session)
seen_urls.add(url)
extract_urls(html)
pq=PyQuery(html)
title= pq("title").text()
async with pool.acquire() as conn:
async with conn.cursor() as cur:
insert_sql= "insert into artest(title) values (‘{}‘)".format(title)
await cur.execute(insert_sql)
asyncdefconsumer(pool):
async with aiohttp.ClientSession() as session:while notstopping:if len(waitting_urls) ==0:
await asyncio.sleep(0.5)continueurl=waitting_urls.pop()print("start get url: {}".format(url))if re.match("http://.*?jobbole.com/.*\.html", url):if url not inseen_urls:
asyncio.ensure_future(article_handler(url, session, pool))
await asyncio.sleep(0.5)else:if url not inseen_urls:
asyncio.ensure_future(init_urls(url, session))
asyncdefmain(loop):
pool= await aiomysql.create_pool(host=‘‘, port=,
user=‘‘, password=‘‘,
db=‘‘, loop=loop,
charset="utf8", autocommit=True)
async with aiohttp.ClientSession() as session:
html=await fetch(start_url, session)
seen_urls.add(start_url)
extract_urls(html)
asyncio.ensure_future(consumer(pool))if __name__ == "__main__":
loop=asyncio.get_event_loop()
asyncio.ensure_future(main(loop))
loop.run_forever()