官方教程:
【1】https://compiletoi.net/fast-scraping-in-python-with-asyncio/
【2】https://docs.python.org/3.4/library/asyncio.html
【3】http://aiohttp.readthedocs.io/en/stable/
我用的python版本是 3.5.2 ,官方给出的 body=yield from response.read_and_close() 方法不可行,然后我拆分成下面的代码,以后会继续学习 异步
import asyncio
import aiohttp
@asyncio.coroutine
def wget(host):
print ('wget %s' %host)
connect=asyncio.open_connection(host,80)
reader,writer=yield from connect
header='GET / HTTP/1.0\r\nHost: %s\r\n\r\n' % host
writer.write(header.encode('utf-8'))
yield from writer.drain()
while True:
line=yield from reader.readline()
if line ==b'\r\n':
break
print ('%s header > %s' %(host,line.decode('utf-8').rstrip()))
writer.close()
@asyncio.coroutine
def print_page(url):
response=yield from aiohttp.request('GET',url)
body=yield from response.read()
response.close()
print (body)
#loop=asyncio.get_event_loop()
#tasks=[wget(host) for host in ['mail.163.com','www.sina.com.cn','www.sohu.com']]
#loop.run_until_complete(asyncio.wait(tasks))
loop=asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait([print_page('http://tieba.baidu.com/i/357696678/concern'),print_page('http://www.baidu.com')]))
loop.close()