Python爬取笔趣阁小说

在这里插入代码片@TOC

-- coding:utf-8 --

#[url=https://www.biquge.info/wanjiexiaoshuo/]https://www.biquge.info/wanjiexiaoshuo/[/url] 笔趣阁小说全本爬虫
import time
import requests
import os
import random
from lxml import etree
import webbrowser
header = {
“User-Agent”: “Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36 Edg/89.0.774.77”
}
noName = [’#’,’/’,’\’,’:’,’’,’?’,’"’,’<’,’>’,’|’] #/:?"<>|
filePath = ‘./保存小说’
def strZ(_str): #将特殊字符转换为空格
ret = ‘’
for _ in _str:
if _ in noName:
ret += " "
else:
ret += _
return ret
def main():
webbrowser.open(‘https://www.biquwx.la/’)
if not os.path.exists(filePath):
os.mkdir(filePath)
print(‘1.爬取指定小说’)
print(‘2.爬取整个站点’)
if input(‘使用哪种方式爬取小说? ‘) == ‘1’:
appintDown()
else :
allDown()
input(“按下任意键退出”)
def appintDown(): #爬取指定小说 前提是网页没错
page_url = input(‘输入要爬取的小说网站(例如 [url=https://www.biquwx.la/10_10240/]https://www.biquwx.la/10_10240/[/url]) : ‘)
page = requests.get(url=page_url, headers=header)
if page.status_code == 200: # 响应就爬取
page.encoding = ‘utf-8’
page_tree = etree.HTML(page.text)
page_title = page_tree.xpath(’//div[@id=“info”]/h1/text()’)[0]
_filePath = filePath + ‘/’ + page_title
if not os.path.exists(_filePath):
os.mkdir(_filePath)
page_dl_list = page_tree.xpath(’//div[@class=“box_con”]/div[@id=“list”]/dl/dd’)
for _ in page_dl_list:
_page_url = page_url + _.xpath(’./a/@href’)[0]
_page_title = filePath + ‘/’ + strZ(.xpath(’./a/@title’)[0]) + ‘.txt’
_page = requests.get(_page_url, headers=header)
if _page.status_code == 200:
_page.encoding = ‘utf-8’
_tree = etree.HTML(_page.text)
_page_content = _tree.xpath(’//div[@id=“content”]/text()’)
fileContent = ‘’
for _ in _page_content:
fileContent += _ + ‘\n’
with open(_page_title, ‘w’, encoding=‘utf-8’) as fp:
fp.write(fileContent)
print(’%s成功下载到本地’ % (_page_title))
time.sleep(random.uniform(0.05, 0.2))
def allDown(): #整个站点小说爬取
url = ‘https://www.biquge.info/wanjiexiaoshuo/’ # 目录
page = requests.get(url=url, headers=header)
if page.status_code == 200: # 响应就爬取
page.encoding = ‘utf-8’
tree = etree.HTML(page.text)
page_last = tree.xpath(’//div[@class=“pagelink”]/a[@class=“last”]/text()’)[0]
for page_i in range(1, int(page_last)): # 小说页数遍历
url = ‘https://www.biquge.info/wanjiexiaoshuo/’ + str(page_i)
page = requests.get(url=url, headers=header)
if page.status_code == 200: # 响应就爬取
page.encoding = ‘utf-8’
tree = etree.HTML(page.text)
li_list = tree.xpath(’//div[@class=“novelslistss”]/ul/li’)
for li in li_list:
page_url = li.xpath(’./span[@class=“s2”]/a/@href’)[0] # 目录链接
page_title = strZ(li.xpath(’./span[@class=“s2”]/a/text()’)[0])
page = requests.get(url=page_url, headers=header)
if page.status_code == 200: # 响应就爬取
page.encoding = ‘utf-8’
page_tree = etree.HTML(page.text)
_filePath = filePath + ‘/’ + page_title
if not os.path.exists(_filePath):
os.mkdir(_filePath)
page_dl_list = page_tree.xpath(’//div[@class=“box_con”]/div[@id=“list”]/dl/dd’)
for _ in page_dl_list:
_page_url = page_url + _.xpath(’./a/@href’)[0]
_page_title = filePath + ‘/’ + strZ(.xpath(’./a/@title’)[0]) + ‘.txt’
_page = requests.get(_page_url, headers=header)
if _page.status_code == 200:
_page.encoding = ‘utf-8’
_tree = etree.HTML(_page.text)
_page_content = _tree.xpath(’//div[@id=“content”]/text()’)
fileContent = ‘’
for _ in _page_content:
fileContent += _ + ‘\n’
with open(_page_title, ‘w’, encoding=‘utf-8’) as fp:
fp.write(fileContent)
print(’%s成功下载到本地’ % (_page_title))
time.sleep(random.uniform(0.05, 0.2))
if name == ‘main’:
main()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值