import requests
from lxml import etree
import json
import csv
class BookSpider(object):
def __init__(self):
self.base_url = "http://www.allitebooks.org/page/{}/"
self.headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"}
self.data_list =[]
#构建所有的url
def get_url_list(self):
url_list = []
for i in range(3):
url = self.base_url.format(i)
url_list.append(url)
return url_list
#2、发送请求
def send_request(self,url):
data = requests.get(url,headers=self.headers).content.decode()
return data
#3、解析数据
def parse_xpath_data(self,data):
parse_data = etree.HTML(data)
#解析出所有的书
book_list = parse_data.xpath('//div[@class="main-content-inner clearfix"]/article')
#解析出每本书的信息
#1、书名字
for book in book_list:
book_dict={}
book_dict["book_name"] = book.xpath('.//h2[@class="entry-title"]//text()')[0]
#2、书的图片
book_dict["book_img_url"] = book.xpath('div[@class="entry-thumbnail hover-thumb"]/a/img/@src')[0]
#3、书的作者
book_dict["book_author"]=book.xpath('.//h5[@class="entry-author"]/a/text()')[0]
#4、书的简介
book_dict["book_info"] = book.xpath('.//div[@class="entry-summary"]/p/text()')[0]
# print(book_info)
self.data_list.append(book_dict)
#4、保存数据
def save_data(self):
json.dump(self.data_list,open("04book.json","w"))
json_fd = open("04book.json", "r")
csv_fd = open("04.csv", "w",encoding='utf-8')
# 2、提出表头,表内容
# 将字符串转化成列表
data_list = json.load(json_fd)
sheet_title = data_list[0].keys()
sheet_data = []
for data in data_list:
sheet_data.append(data.values())
# 3、csv写入器
writer = csv.writer(csv_fd)
# 4、写入表头
writer.writerow(sheet_title)
# 5、写入内容
writer.writerows(sheet_data)
# 6、关闭两个文件
json_fd.close()
csv_fd.close()
#统筹调用
def start(self):
url_list =self.get_url_list()
for url in url_list:
data =self.send_request(url)
self.parse_xpath_data(data)
self.save_data()
BookSpider().start()
Python爬虫——保存至csv
最新推荐文章于 2023-12-25 06:30:00 发布