爬取飞卢小说网的任意小说
需要的库就三个
import requests
import re
import os
飞卢小说网的url关系很简单,主要的小说内容就是原来小说界面后面加了一个_1
爬虫函数,id在主函数会输入
def bug(id):
sum = 1
url1 = "https://b.faloo.com/"+id+".html"
html1 = requests.get(url1)
length_ml = len(re.findall(r'<a href=.+? target="_self" title=.+?',html1.text))
title = re.search(r'<title>.+?</title>',html1.text)
title = title.group(0).strip("<title></title>")
print(title+":共"+str(length_ml)+'章')
try:
os.mkdir(os.getcwd()+'\\'+title)
except FileExistsError:
return title
return 0
while sum < length_ml:
url2 = "https://b.faloo.com/"+id+"_"+str(sum)+".html"
html2 = requests.get(url2)
titles = re.search(r"<title>.+?</title>",html2.text)
titles = titles.group(0).strip("<title></title>")
print("正在爬取第"+str(sum)+"章:"+titles)
tt = os.getcwd()+'\\'+title+'\\'+titles+".txt"
with open(tt,'w',encoding="utf-8") as fp:
txt = re.findall(r'<p>.+?</p>',html2.text)
if txt == "[]":
print('第'+str(sum)+"章:"+titles+"爬取失败")
break
else:
for i in txt:
fp.write(i.strip("<p></p>")+'\n')
print('第'+str(sum)+"章:"+titles+"爬取成功")
sum += 1
return title
而每行小说都在一个<p></p>标签里,所以可以直接用正则表达式的findall函数,匹配出每行小说的内容,但是怕取下来发现一个问题,多出了许多大小2kb的无效文件,这样我们可以定义一个清理无效文件的函数
def GL(title):
path = os.getcwd() + '\\' + title
names = os.listdir(path)
length = len(names)
sum = 0
while sum < length:
if ".txt" in names[sum]:
with open(path+'\\'+names[sum],'r',encoding="utf-8") as fp:
size = os.path.getsize(path+'\\'+names[sum])
if size < 1000:
flag = True
debug = False
else:
flag = False
if flag == True:
os.remove(path+'\\'+names[sum])
print("无效文件"+names[sum]+"已删除")
else:
debug = True
sum += 1
if debug == True:
print("无效文件已全部删除")
else:
print("无效文件还没删除")
然后接上主函数
def main():
id = input("请输入漫画id>>>")
title = bug(id)
GL(title)
最后主函数入口
if __name__ == '__main__':
main()
ok,完事儿了,如果喜欢本文章请麻烦点一个爱心留言加收藏
爱你你么么哒(づ ̄ 3 ̄)づ