1.本例子为一个单/双/四进程爬虫耗时对比
2.顺带练习了爬取嗅事百科生成字典写入文件中的方法
3.练习正则表达式的使用
4.代码如下:
import re
import time
import requests
import json
from multiprocessing import Pool
headers = {
'User - Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36'
}
def re_scraper(url):
res = requests.get(url, headers=headers)
names = re.findall('<h2>(.*?)</h2>', res.text, re.S)
contents = re.findall('<div class="content">.*?<span>(.*?)</span>', res.text, re.S)
laughs = re.findall('<i class="number">(\d+)</i> 好笑', res.text, re.S)
comments = re.findall('<i class="number">(\d+)</i> 评论', res.text, re.S)
infos = list()
for name, content, laugh, comment in zip(names, contents, laughs, comments):
info = {
'name': name,
'content': content,
'laugh': laugh,
'comment': comment
}
infos.append(info)
with open(r"D:\Python_Test\text.txt", "w", encoding='utf-8') as filewriter:
for item in infos:
filewriter.write(json.dumps(item, ensure_ascii=False))
filewriter.write('\r\n')
if __name__ == '__main__':
urls = ["https://www.qiushibaike.com/8hr/page/{}/".format(str(i)) for i in range(1, 26)]
start_1 = time.time()
for url in urls:
re_scraper(url)
end_1 = time.time()
print("串行型爬虫耗时:", end_1-start_1)
start_2 = time.time()
pool = Pool(processes=2)
pool.map(re_scraper, urls)
end_2 = time.time()
print('2进程爬虫耗时:',end_2-start_2)
start_3 = time.time()
pool = Pool(processes=4)
pool.map(re_scraper, urls)
end_3 = time.time()
print('4进程爬虫耗时:', end_3 - start_3)
学习总结,高手勿拍砖