在昨天讲过了python中并发规则今天通过实战使用一下
import threading
import time
import requests
from bs4 import BeautifulSoup
import queue
urls = [
"https://www.cnblogs.com/#p%d" % (page) for page in range(1, 200)
]
# print(urls)
def craw(url):
r = requests.get(url)
return r.text
# print(url,len(r.text))
def parse(html):
# post - item - title
soup = BeautifulSoup(html, 'lxml')
links = soup.find_all('a', class_='post-item-title')
return [(link['href'], link.get_text()) for link in links]
def do_craw(url_queue: queue.Queue, html_queue: queue.Queue):
while True:
url = url_queue.get()
html = craw(url)
html_queue.put(html)
print(threading.current_thread().name, f'正在爬取{url}', '队列大小=', url_queue.qsize()
)
# time.sleep(random.randint(1, 2))
if url_queue.empty():
break
def do_prase(html_queue: queue.Queue, fout):
while True:
html = html_queue.get()
results = parse(html)
for result in results:
fout.write(str(result) + '\n')
print(threading.current_thread().name, f'result.size', len(results), html_queue.qsize()
)
# time.sleep(random.randint(1, 2))
if html_queue.empty():
break
if __name__ == '__main__':
url_queue=queue.Queue()
html_queue=queue.Queue()
for url in urls:
url_queue.put(url)
for i in range(8):
t=threading.Thread(target=do_craw,args=(url_queue,html_queue,),name=f'爬取线程{i}')
t.start()
fout=open('text.txt','w',encoding='utf-8')
for i in range(6):
t = threading.Thread(target=do_prase,args=(html_queue,fout),name=f'解析线程{i}')
t.start()
通过对比多线程爬虫比单线程快了近40倍