python糗事百科爬虫
爬取url:https://www.qiushibaike.com/hot/
此网页爬取分析时选取网页特定元素进行爬取,需了解xpath的用法和lxml库的用法
# coding=utf-8
from lxml import etree
import requests
import json
class QiubaiSpider:
def __init__(self):
self.temp_url = "https://www.qiushibaike.com/hot/page/{}/"
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"}
def get_url_list(self): # 根基url地址的规律,构造url_list
url_list = [self.temp_url.format(i) for i in range(1, 14)]
return url_list
def parse_url(self, url):
print("now parsing:", url)
response = requests.get(url, headers=self.headers)
return response.content.decode()
def get_content_list(self, html_str):
html = etree.HTML(html_str) # 变为一个html
# 1.分组
div_list = html.xpath("//div[@id='content-left']/div")
content_list = []
for div in div_list:
item = {}
# 作者名称
item["author_name"] = div.xpath(".//h2/text()")[0].replace("\n", "") if len(div.xpath(".//h2/text()")) > 0 else None
# 发表内容
item["content"] = div.xpath(".//div[@class='content']/span/text()")
item["content"] = [i.strip() for i in item["content"]]
# 好笑数
item["stats-vote"] = div.xpath(".//span[@class='stats-vote']/i/text()")
item["stats-vote"] = item["stats-vote"][0] if len(item["stats-vote"]) > 0 else None
# 评论数
item["stats-comments"] = div.xpath(".//span[@class='stats-comments']/i/text()")
item["stats-comments"] = item["stats-comments"][0] if len(item["stats-comments"]) > 0 else None
# 图片
item["img"] = div.xpath(".//div[@class='thumb']/img/@src")
item["img"] = "https:" + item["img"][0] if len(item["img"]) > 0 else None
print(item["author_name"])
content_list.append(item)
return content_list
def save_content_list(self, content_list):
with open("qiubai.txt", "a", encoding="utf-8") as f:
for content in content_list:
f.write(json.dumps(content, ensure_ascii=False))
f.write("\n")
print("save success!")
def run(self): # 实现主要逻辑
# 1.根据url地址的规律,构造url_list
url_list = self.get_url_list()
# 2.发送请求,获取响应
for url in url_list:
html_str = self.parse_url(url)
# 3.提取数据
content_list = self.get_content_list(html_str)
# 4.保存
self.save_content_list(content_list)
if __name__ == '__main__':
qiubai = QiubaiSpider()
qiubai.run()