#!/usr/bin/python3
import requests
import json
from lxml import etree #需安装:pip3 install lxml
#爬虫 - 糗事百科-文字列表
class QiushiSpider:
def __init__(self):
self.url_temp = "https://www.qiushibaike.com/text/page/{}/"
self.headers = {
'Referer': "https://www.qiushibaike.com/text/page/{}/",
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
}
def get_url_list(self): #根据url地址的规律构造链接地址列表
url_list = [self.url_temp.format(i) for i in range(1,13)]
return url_list
def parse_url(self, url): #请求数据
self.headers['Referer'] = url
response = requests.get(url,headers=self.headers)
return response.content.decode()
def get_content_list(self,html_str): #提取数据
html = etree.HTML(html_str)
div_list = html.xpath("//div[@id='content-left']/div")
content_list = []
for div in div_list:
item = {}
item['user'] = div.xpath(".//h2/text()")[0].strip() if len(div.xpath(".//h2/text()"))>0 else None #三元运算
item['portrait'] = "https://"+div.xpath(".//div[@class='author clearfix']/a/img/@src")[0] if len(div.xpath(".//div[@class='author clearfix']/a/img/@src"))>0 else None
item['number'] = div.xpath(".//i[@class='number']/text()")
item['content'] = div.xpath(".//div[@class='content']/span/text()")
item['content'] = [i.strip() for i in item['content']]
content_list.append(item)
print(item)
return content_list
def save_content_list(self,content_list): #保存
with open("qiushibaike.txt","a",encoding="utf-8") as f:
for content in content_list:
f.write(json.dumps(content,ensure_ascii=False))
f.write("\n")
print("保保存成功")
def run(self): #实现主要逻辑
#取url列表
url_list = self.get_url_list()
for url in url_list:
#发送请求
html_str = self.parse_url(url)
#提取数据
content_list = self.get_content_list(html_str)
#保存
self.save_content_list(content_list)
# 入口
if __name__ == '__main__':
qiushi = QiushiSpider()
qiushi.run()
python-网页爬虫-糗事百科分页获取数据实例
最新推荐文章于 2024-11-04 10:51:20 发布