爬取香哈菜谱—炒菜做饭无忧虑
import requests
from lxml import etree
import re
import openpyxl
import os
class XiangHaSpider():
def __init__(self):
self.url = 'https://www.xiangha.com/caipu/c-jiachang/hot-{}/'
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
}
self.name = 'xiangha.xlsx'
self.excel_headers = ['菜名', '功效', '作料', '步骤', '浏览量', '收藏量']
if not os.path.exists('xiangha.xlsx'):
wb = openpyxl.Workbook()
wb.save(self.name)
self.open_excel(self.name)
self.write_headers()
self.row = 2
def open_excel(self,name):
self.wb = openpyxl.load_workbook(name)
def write_headers(self):
self.sh = self.wb['Sheet']
for index, i in enumerate(self.excel_headers):
self.write_content(self.sh,row=1,column=index+1,value=i)
def write_content(self,sh,row,column,value):
sh.cell(row=row, column=column, value=value)
self.wb.save(self.name)
def send_request(self, full_url):
response = requests.get(url=full_url, headers=self.headers)
if response.status_code == 200:
return response
else:
print('出错了~')
def parse_content(self, content):
datas = []
content = content.text
content = etree.HTML(content)
li_list = content.xpath('//div[@class="s_list"]//li')
for li in li_list:
href = li.xpath('./a/@href')[0]
response = self.send_request(href)
if response:
datas.append(self.parse_detail_content(response))
return datas
def parse_detail_content(self, content):
content = content.text
content = etree.HTML(content)
title = "".join(content.xpath('//h2[@class="dish-title"]/text()'))
hea = "".join(content.xpath('//div[@class="rec_hea"]//p//text()'))
td_list = content.xpath('//table//tr/td')
all_zl = ''
book_make = ''
for td in td_list:
a = td.xpath('.//div[@class="cell"]/a[@class="link"]')
if not a:
zl_content = td.xpath('.//div[@class="cell"]//text()')
else:
a = a[0]
zl_content = a.xpath('.//text()')
all_zl += "".join(zl_content) + '\n'
li_list = content.xpath('//li[contains(@id,"make")]')
for li in li_list:
step = "".join(li.xpath('.//text()'))
book_make += step + '\n'
vnum = coll = 0
nums = "".join(content.xpath('//div[@class="info"]//text()'))
nums = re.findall('(\d+).*?(\d+)', nums)
if nums:
vnum, coll = nums[0]
return title, hea, all_zl, book_make, vnum, coll
def save_content(self, data):
for i in data:
for j,k in enumerate(i):
self.write_content(self.sh,row=self.row,column=j+1,value=k)
self.row+=1
def start(self):
for i in range(1, 10):
full_url = self.url.format(i)
response = self.send_request(full_url)
if response:
t = self.parse_content(response)
self.save_content(t)
if __name__ == '__main__':
xhs = XiangHaSpider()
xhs.start()