import scrapy
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from myscrapys.items import imgsPipeline
from scrapy.http import Request
class JandanSpider(CrawlSpider):
name = "jandan"
allowed_domains = ["jandan.net"]
start_urls = (
'http://jandan.net/ooxx',
)
rules = (
Rule(LinkExtractor(allow='jandan.net/ooxx/page-\d+\S+',),'parse_new_url',follow=True,),
)
def parse_new_url(self, response):
self.log("parse_new_url url %s" % response.url)
imgs = response.xpath("//div[1]/div/div[2]/p/img")
for sel in imgs:
img_urls = imgs.xpath("@src").extract()
yield imgsPipeline(image_urls = img_urls)
for sel_img in imgs:
self.log("find img url : %s" % sel_img)
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from myscrapys.items import imgsPipeline
from scrapy.http import Request
class JandanSpider(CrawlSpider):
name = "jandan"
allowed_domains = ["jandan.net"]
start_urls = (
'http://jandan.net/ooxx',
)
rules = (
Rule(LinkExtractor(allow='jandan.net/ooxx/page-\d+\S+',),'parse_new_url',follow=True,),
)
def parse_new_url(self, response):
self.log("parse_new_url url %s" % response.url)
imgs = response.xpath("//div[1]/div/div[2]/p/img")
for sel in imgs:
img_urls = imgs.xpath("@src").extract()
yield imgsPipeline(image_urls = img_urls)
for sel_img in imgs:
self.log("find img url : %s" % sel_img)
pass
通过scrapy.pdf,url的提取已经有现成的。LinkExtractor。代码更少了。
尝试了一下,总共怕了11932个文件图片出来,也不知道是否爬完了。