import requests
from lxml import etree
class Qiubai:
def __init__(self):
self.temp_url = "https://www.qiushibaike.com/text/page/{}"
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"}
def get_url_list(self):
return [self.temp_url.format(i) for i in range(1,3)]
def parse_url(self,url):
response = requests.get(url,headers=self.headers)
return response.content.decode()
def get_content_list(self, html_str): #提取数据
html = etree.HTML(html_str) #通过此方法得到elements对象,为了后续使用xpath方法
div_list = html.xpath("//div[@id='content-left']/div")
content_list = []
for div in div_list:
item = {}
item["user_name"] = div.xpath(".//h2/text()")[0].strip() #xpath得到的是一个列表,有一个内容就[0]
item["content"] = [i.strip() for i in div.xpath(".//div[@class='content']/span/text()")]
content_list.append(item)
return content_list
def save_content_list(self, content_list): #保存
for content in content_list:
print(content)
def run(self): #实现的主要逻辑
# 1.准备url列表(知道url地址的总共页数和规律,可以准备url列表,不知道规律,准备start_url地址)
url_list = self.get_url_list()
# 2.遍历发送请求,获取响应
for url in url_list:
html_str = self.parse_url(url)
# 3.提取数据
content_list = self.get_content_list(html_str)
# 4.保存
self.save_content_list(content_list)
if __name__ == "__main__":
qiubai = Qiubai()
qiubai.run()
多线程糗百爬虫
最新推荐文章于 2021-01-28 16:19:19 发布