乱世佳人的小说文本爬取
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
titles=[]
texts=[]
首先引用urlopen ,Request,Beautiful
建立两个列表存放标题和每篇文章的内容
def updatepages(url, header):
ret = Request(url, headers=header)
html = urlopen(ret).read().decode('gbk') # 如果不加.read().decode('gbk')会出现乱码
bs = BeautifulSoup(html, 'html.parser')
return bs
url = 'https://www.uuzuowen.com/mingzhu/luanshijiaren/'
header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
定义一个函数updatepages(),用于后面函数的调用
bs=updatepages(url,header)
link = bs.find('ul',{'class':''})
for into in link.find_all('li'):
link1=into.find('a')['href']
fnlink='https://www.uuzuowen.com/'+link1
bs = updatepages(fnlink, header)
we = bs.find('h1').get_text() # 每一章的标题
titles.append(we)
bs = updatepages(fnlink, header)
text = bs.find('div',{'class':'articleContent'})
text=text.get_text()
texts.append(text)
with open('lsjr.txt','w',encoding='utf-8') as file:
for i in range(len(titles)):
print('{0:{2}<30}{1:{2}^12}'.format(titles[i], texts[i], chr(12288)))
file.write(titles[i]) ## 参数只接受str
file.write(texts[i]
这样爬取的数据就到了lsjr.txt