import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from BWM3.items import Bwm3Item
class Bwm3GtSpider(CrawlSpider):
name = ‘bwm3_gt’
allowed_domains = [‘car.autohome.com.cn’]
start_urls = [‘https://car.autohome.com.cn/pic/series/2963.html’]
rules = (
Rule(LinkExtractor(allow=r'https://car.autohome.com.cn/pic/series/2963.+'), callback='parse_item', follow=True),
)
# https://car.autohome.com.cn/pic/series/2963-1.html
# https://car.autohome.com.cn/pic/series/2963-10.html
# https://car.autohome.com.cn/pic/series/2963-3.html
# https://car.autohome.com.cn/pic/series/2963-12.html
def parse_item(self, response):
# 类别
catagory = response.xpath('//div[@class="uibox"]/div[1]/text()').get().strip()
if catagory == '':
pass
else:
catagory = catagory
# 图片网址
a_url = response.xpath('//div[@class="uibox"]/div[2]/ul/li/a/img/@src').getall()
# img_urls = []
# for i in a_url:
# # response.urljoin 表示根据需要填充 URL、
# # imgs = 'https:' + i
# imgs = response.urljoin(i)
# img_urls.append(imgs)
# 一行代码代替多行
img_urls = list(map(lambda x:response.urljoin(x.replace('240x180_0_q95_c42_','')),a_url))
# print(catagory)
# print(img_urls)
# print('=========')
# 映射
yield Bwm3Item(catagory=catagory,img_urls=img_urls)