import requests
from lxml import etree
import json
class BookSpider(object):
def __init__(self):
self.base_url = "http://www.allitebooks.org/page/{}/"
self.headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"}
self.data_list =[]
#构建所有的url
def get_url_list(self):
url_list = []
for i in range(3):
url = self.base_url.format(i)
url_list.append(url)
return url_list
#2、发送请求
def send_request(self,url):
data = requests.get(url,headers=self.headers).content.decode()
return data
#3、解析数据
def parse_xpath_data(self,data):
parse_data = etree.HTML(data)
#解析出所有的书
book_list = parse_data.xpath('//div[@class="main-content-inner clearfix"]/article')
#解析出每本书的信息
#1、书名字
for book in book_list:
book_dict={}
book_dict["book_name"] = book.xpath('.//h2[@class="entry-title"]//text()')[0]
#2、书的图片
book_dict["book_img_url"] = book.xpath('div[@class="entry-thumbnail hover-thumb"]/a/img/@src')[0]
#3、书的作者
book_dict["book_author"]=book.xpath('.//h5[@class="entry-author"]/a/text()')[0]
#4、书的简介
book_dict["book_info"] = book.xpath('.//div[@class="entry-summary"]/p/text()')[0]
# print(book_info)
self.data_list.append(book_dict)
#4、保存数据
def save_data(self):
json.dump(self.data_list,open("04book.json","w"))
#统筹调用
def start(self):
url_list =self.get_url_list()
for url in url_list:
data =self.send_request(url)
self.parse_xpath_data(data)
self.save_data()
BookSpider().start()
Python爬虫——allitebooks网站——xpath
最新推荐文章于 2024-03-29 09:47:06 发布