利用BeautifulSoup库,获取前250本图书的信息,需要爬取的信息包括书名、书名的URL链接、作者、出版社和出版时间、书本价格、评分和评论,把这些信息存到txt文件,要求将这些信息对齐,排列整齐 (我是刚学习网络爬虫,代码如有错误,望指正)
网址为:https://book.douban.com/top250
代码如下:
import requests
from bs4 import BeautifulSoup
import time
headers={"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.106 Safari/537.36"}
book_list=[]
def book_info(url):
book_data=requests.get(url,headers=headers)
soup=BeautifulSoup(book_data.text,"lxml")
book_is=soup.select("div.pl2 > a")
lines=soup.select("p.pl")
marks=soup.select("span.rating_nums")
comtents=soup.select("span.inq")
for book_i,line,mark,comtent in zip(book_is,lines,marks,comtents):
line=line.get_text().split("/")
data={
"book_name":book_i.get_text().replace('\n','').replace(' ',''),
"book_url":book_i['href'],
"line":' '.join(line),
"mark":mark.get_text(),
"comtent":comtent.get_text()
}
book_list.append(data)
if __name__=='__main__':
urls=["https://book.douban.com/top250?start={}".format(str(i)) for i in range(0,250,25)]
for url in urls:
book_info(url)
time.sleep(1)
for word in book_list:
fo=open(r'D:\Python爬虫\doubanbook.txt',"a+",encoding='utf-8')
try:
fo.write(word["book_name"]+" "+word["book_url"]+" "+word["line"]+" "+word["mark"]+"分"+" "+word["comtent"]+"\n")
fo.write("\n")
fo.close()
except UnicodeDecodeError:
pass
结果部分截图: