# -*- coding: utf-8 -*-
import scrapy
from copy import deepcopy
import re
# https://list.jd.com/list.html?cat=1713%2C3258%2C3297&page=1&s=1&click=0
# https://list.jd.com/list.html?cat=1713%2C3258%2C3297&page=3&s=53&click=0
# https://list.jd.com/list.html?cat=1713%2C3258%2C3297&page=5&s=105&click=0
# https://list.jd.com/list.html?cat=1713%2C3258%2C3297&page=7&s=157&click=0
class JdlianxiSpider(scrapy.Spider):
name = 'jdlianxi'
allowed_domains = ['jd.com']
start_urls = ['https://book.jd.com/booksort.html/']
def parse(self, response):
dt_list=response.xpath('//div[@class="mc"]/dl/dt')
dd_list =response.xpath('//div[@class="mc"]/dl/dd')
for index,dt in enumerate(dt_list):
item={}
item['b_case']=dt.xpath('./a/text()').extract_first()
# print(item)
for i,dd in enumerate(dd_list):
if i == index :
dd=dd.xpath('./em')
for em in dd:
item['m_case']=em.xpath('./a/text()').extract_first()
item['url']='https://'+em.xpath('./a/@href').extract_first().strip('/')
yield scrapy.Request(url=item['url'],
callback=self.fanye,
meta={'item': deepcopy(item)})
def fanye(self,response):
item = response.meta.get('item')
content=response.text
cat=re.compile('.*?SEARCH.base_url="(.*?)";.*?')
x= cat.findall(content)
if len(x)>0:
x=x[0]
for i in range(1, 10):
# https://list.jd.com/list.html?cat=1713%2C3258%2C3297&page=7&s=&click=0
url = 'https://list.jd.com/list.html?{}&page={}&s={}{}&click=0'.format(x,2 * i - 1,5 * i,2 * i - 1)
item['n_url']=url
yield scrapy.Request(url=url,
callback=self.get_next_page,
meta={'item': deepcopy(item)})
def get_next_page(self,response):
item=response.meta.get('item')
li_list=response.xpath('//ul[@data-tpl="2"]/li')
for li in li_list:
item['img']='https:'+li.xpath('.//div[@class="p-img"]/a/img/@src').extract_first()
item['title']=li.xpath('.//div[@class="p-name"]/a/em/text()').extract_first()
item['price']=li.xpath('.//div[@class="p-price"]/strong//text()').extract()
item['price']=''.join([i.strip() for i in item['price'] if len(i.strip()) > 0])
print(item)
我觉得翻页这部分可以改善一下
爬取京东图书
最新推荐文章于 2021-12-15 10:09:32 发布