"""
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2022/7/17 4:14
# @Author : allen
"""
from lxml import etree
import requests
from bs4 import BeautifulSoup
import re
if __name__ == "__main__":
url = "https://www.biqugebang.com/book/4475/"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36"}
response = requests.get(url=url, headers=headers)
response.encoding = response.apparent_encoding
page_text = response.text
tree = etree.HTML(page_text)
div_list = tree.xpath("//div[@class='listmain']/dl/dd")
div_list1 = div_list[18:19]
for div in div_list1:
text_url = "https://www.biqugebang.com/book/4475/" + div.xpath("./a/@href")[0]
text_name = div.xpath("./a/text()")[0]
text_name = text_name.encode('utf-8').decode('utf-8')
html_data = requests.get(url=text_url, headers=headers).text
soup1 = BeautifulSoup(html_data, 'html.parser', )
div_tag = soup1.find("div", id="content")
content = div_tag.text
data_list = re.split(r'。', content)
file = open("a.txt", "a+")
file.write(text_name+"\n")
for data in data_list:
file.write(data + "\n")
print(text_name, "获取成功!!!")
使用etree xpath爬虫某网站的小说。写了好几天了,不想写注释,尝试看懂吧,反正也不难。实在不懂,想学的可以私我,有时间就帮你解答下困惑,没时间就算了。## python3.7 xpath爬取小说