QiushibaikeComparation.py:
# 导入相应的库文件
import requests
import re
from bs4 import BeautifulSoup
from lxml import etree
import time
# 加入请求头
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
# 构造urls
urls = ['https://www.qiushibaike.com/text/page/{}/'.format(str(i)) for i in range(1, 36)]
# 用正则爬虫
def re_scraper(url):
res = requests.get(url)
ids = re.findall('<h2>(.*?)</h2>', res.text, re.S)
contents = re.findall('<div class="content">.*?<span>(.*?)</span>', res.text, re.S)
laughs = re.findall('<span class="stats-vote"><i class="number">(\d+)</i>', res.text, re.S)
comments = re.findall('<i class="number">(\d+)</i> 评论', res.text, re.S)
for id, level, laugh, comment in zip(ids, contents, laughs, comments):
info = {
'id': id,
'level': level,
'laugh': laugh,
'comment': comment
}
return info
# Beautifulsoup爬虫
def bs_scraper(url):
res = requests.get(url)
soup = BeautifulSoup(res.text, 'lxml')
ids = soup.select('a > h2')
contents = soup.select('div > span')
laughs = soup.select('span.stats-vote > i')
comments = soup.select('<i.number')
for id, level, laugh, comment in zip(ids, contents, laughs, comments):
info = {
'id': id,
'level': level,
'laugh': laugh,
'comment': comment
}
return info
# Lxml爬虫
def lxml_scraper(url):
res = requests.get(url)
selector = etree.HTML(res.text)
url_infos = selector.xpath('//div[@class="article block untagged mb15"]')
try:
for url_info in url_infos:
id = url_info.xpath('div[1]/a[2]/h2/text()')[0]
content = url_info.xpath('a[1]/div/span/text()')[0]
laugh = url_info.xpath('div[2]/span[1]/i/text()')[0]
comment = url_info.xpath('div[2]/span[2]/a/i/text()')[0]
info = {
'id': id,
'content': content,
'laugh': laugh,
'comment': comment
}
return info
except IndexError:
pass
# 程序主入口
if __name__ == '__main__':
for name, scraper in [('Rugular expressions',re_scraper),('BeautifulSoup', bs_scraper),
('Lxml',lxml_scraper)]:
start = time.time()
for url in urls:
scraper(url)
end = time.time()
print(name, end-start)