spiders文件夹下的爬虫文件(自己在spiders下创建) # -*- coding: utf-8 -*- import scrapy import json from Douyu.items import DouyuItem class DouyuspiderSpider(scrapy.Spider): name = 'douyuspider' allowed_domains = ['douyucdn.cn'] basicUrl="http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20&offset=" offset=0 start_urls = [basicUrl+str(offset)] def parse(self, response): data_list=json.loads(response.body)["data"] if not len(data_list): return for data in data_list: item=DouyuItem() item["nickname"]=data["nickname"] item["imagelink"]=data["vertical_src"] yield item self.offset+=20 yield scrapy.Request(self.basicUrl+str(self.offset),callback=self.parse)
items文件
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy class DouyuItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() nickname=scrapy.Field() imagelink=scrapy.Field()
pipelines文件
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import os #用来重命名 import json import scrapy from settings import IMAGES_STORE as images_store from scrapy.pipelines.images import ImagesPipeline #专门用来下载图片的函数 class DouyuPipeline(ImagesPipeline): def get_media_requests(self,item,info): image_link=item["imagelink"] yield scrapy.Request(image_link) def item_completed(self, results, item, info): #用来重命名 #取出图片信息results里的图片的path路径信息,OK表示results里的true image_path=[x["path"]for ok,x in results if ok] os.rename(images_store+image_path[0],images_store+item["nickname"]+".jpg")#重命名方法 return item
settings文件
# -*- coding: utf-8 -*- # Scrapy settings for Douyu project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'Douyu' SPIDER_MODULES = ['Douyu.spiders'] NEWSPIDER_MODULE = 'Douyu.spiders' IMAGES_STORE="D:\PycharmProjects\Douyu\images" #自己设置图片保存的路径 # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = 'Douyu (+http://www.yourdomain.com)' # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'Douyu.middlewares.DouyuSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'Douyu.middlewares.DouyuDownloaderMiddleware': 543, #} # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { 'Douyu.pipelines.DouyuPipeline': 300, } # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'