真正意义上能够全部抓取昵图网全站图片
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from nipic.items import NipicItem
class NipiSpider(scrapy.Spider):
name = "nipi"
allowed_domains = ["nipic.com"]
start_urls = ['http://nipic.com/']
def parse(self, response):
urldata=response.xpath("//div[@class='fl nav-item-wrap']//a/@href").extract()
print("第一层:"+urldata[1])
print("第一层:"+urldata[2])
print("第一层:"+urldata[3])
urldata = urldata[1:4] #只有前面1到3 的标签是 ”设计“,”摄影“,”多媒体“
for i in urldata:
urlnew = response.urljoin(i)
yield Request(url=urlnew, callback=self.next)
def next(self,response):
print("第二层=============")
url2 = response.xpath('//dd[@class="menu-item-list clearfix"]//a/@href').extract()
for j in url2:
url2new =response.urljoin(j)
print(url2new)
yield Request(url=url2new, callback=self.next2)
def next2(self,response):
print("第三层=============")
#获取总页面数
pages = response.xpath('//div[@class="common-page-box mt10 align-center"]//a/@href').extract()
pageslast = response.urljoin(pages[-1])
pagenumber = pageslast.split('=')
page1 = pagenumber[0]
page2 = pagenumber[1]
#构造出所有页面的网址