接着前面的项目继续优化,这次来加入一个进程池加快爬取速度。
这个爬虫可以通过修改进程数量加快爬取速度,不过注意爬的太快会被封ip
import html
from lxml import etree
from multiprocessing.dummy import Pool
from multiprocessing import JoinableQueue
import requests
class Top_250_detail():
def __init__(self):
self.count= 0
self.request_p = Pool(10) # 修改这个数量可以改变最多几个进程同时存在
self.extract_p = Pool(10)
self.extract_p2 = Pool(10)
self.inner_url_q = JoinableQueue()
self.outter_url_q = JoinableQueue()
self.d_q = JoinableQueue()
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36"
}
def outer_url_list(self): # 多少页的页面url
for i in range(0, 226, 25):
url_raw = 'https://movie.douban.com/top250?start={}'.format(i)
self.outter_url_q.put(url_raw)
print('外层url生成完毕')
def inner_url_list(self): # 每页上的25个电影的url地址
while True:
outter_url = self.outter_url_q.get()
self.outter_url_q.task_done() # 只有取完了才关闭?
response = requests.get(outter_url, self.headers)
eroot = etree.HTML(response.text)
element_list = eroot.xpath('//div[@class="item"]')
for i in element_list:
detail_link = i.xpath('./div/a/@href')[0]
self.inner_url_q.put(detail_link)
def get_detail(self): # 获取电影的详细介绍
while True:
inner_url = self.inner_url_q.get()
self.inner_url_q.task_done() # 主要是让join知道这个任务是否执行完了
d_response = requests.get(inner_url, headers=self.headers)
d_eroot = etree.HTML(d_response.text)
detail_summary_list = d_eroot.xpath('//span[@class="all hidden"]/text()')
if not detail_summary_list:
detail_summary_list = d_eroot.xpath('//span[@property="v:summary"]/text()')
for paragraph in detail_summary_list:
self.d_q.put(paragraph)
def get_detail2(self):
while True:
paragraph = self.d_q.get()
self.d_q.task_done()
clean_para = html.escape(paragraph).strip()
print(self.count, clean_para)
print()
self.count += 1
def muti_task(self):
for i in range(3): # 修改这里可以改变速度
self.request_p.apply_async(self.inner_url_list)
self.extract_p.apply_async(self.get_detail)
self.extract_p.apply_async(self.get_detail2)
self.request_p.close()
self.extract_p.close()
self.extract_p2.close()
self.request_p.join()
self.extract_p.join()
self.extract_p2.join()
if __name__ == '__main__':
spider = Top_250_detail()
spider.outer_url_list()
spider.muti_task()
查看队列长度queue.qsize()